]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
virtfs-proxy-helper: use setresuid and setresgid
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <linux/icmp.h>
64 #include "qemu-common.h"
65 #ifdef TARGET_GPROF
66 #include <sys/gmon.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu-xattr.h"
76 #endif
77
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/utsname.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include "linux_loop.h"
101 #include "cpu-uname.h"
102
103 #include "qemu.h"
104
105 #if defined(CONFIG_USE_NPTL)
106 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
107 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
108 #else
109 /* XXX: Hardcode the above values. */
110 #define CLONE_NPTL_FLAGS2 0
111 #endif
112
113 //#define DEBUG
114
115 //#include <linux/msdos_fs.h>
116 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
117 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
118
119
120 #undef _syscall0
121 #undef _syscall1
122 #undef _syscall2
123 #undef _syscall3
124 #undef _syscall4
125 #undef _syscall5
126 #undef _syscall6
127
128 #define _syscall0(type,name) \
129 static type name (void) \
130 { \
131 return syscall(__NR_##name); \
132 }
133
134 #define _syscall1(type,name,type1,arg1) \
135 static type name (type1 arg1) \
136 { \
137 return syscall(__NR_##name, arg1); \
138 }
139
140 #define _syscall2(type,name,type1,arg1,type2,arg2) \
141 static type name (type1 arg1,type2 arg2) \
142 { \
143 return syscall(__NR_##name, arg1, arg2); \
144 }
145
146 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
147 static type name (type1 arg1,type2 arg2,type3 arg3) \
148 { \
149 return syscall(__NR_##name, arg1, arg2, arg3); \
150 }
151
152 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
153 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
154 { \
155 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
156 }
157
158 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
159 type5,arg5) \
160 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
161 { \
162 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
163 }
164
165
166 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5,type6,arg6) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
169 type6 arg6) \
170 { \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
172 }
173
174
175 #define __NR_sys_uname __NR_uname
176 #define __NR_sys_faccessat __NR_faccessat
177 #define __NR_sys_fchmodat __NR_fchmodat
178 #define __NR_sys_fchownat __NR_fchownat
179 #define __NR_sys_fstatat64 __NR_fstatat64
180 #define __NR_sys_futimesat __NR_futimesat
181 #define __NR_sys_getcwd1 __NR_getcwd
182 #define __NR_sys_getdents __NR_getdents
183 #define __NR_sys_getdents64 __NR_getdents64
184 #define __NR_sys_getpriority __NR_getpriority
185 #define __NR_sys_linkat __NR_linkat
186 #define __NR_sys_mkdirat __NR_mkdirat
187 #define __NR_sys_mknodat __NR_mknodat
188 #define __NR_sys_newfstatat __NR_newfstatat
189 #define __NR_sys_openat __NR_openat
190 #define __NR_sys_readlinkat __NR_readlinkat
191 #define __NR_sys_renameat __NR_renameat
192 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
193 #define __NR_sys_symlinkat __NR_symlinkat
194 #define __NR_sys_syslog __NR_syslog
195 #define __NR_sys_tgkill __NR_tgkill
196 #define __NR_sys_tkill __NR_tkill
197 #define __NR_sys_unlinkat __NR_unlinkat
198 #define __NR_sys_utimensat __NR_utimensat
199 #define __NR_sys_futex __NR_futex
200 #define __NR_sys_inotify_init __NR_inotify_init
201 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
202 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
203
204 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
205 defined(__s390x__)
206 #define __NR__llseek __NR_lseek
207 #endif
208
209 #ifdef __NR_gettid
210 _syscall0(int, gettid)
211 #else
212 /* This is a replacement for the host gettid() and must return a host
213 errno. */
214 static int gettid(void) {
215 return -ENOSYS;
216 }
217 #endif
218 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
219 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
220 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
221 #endif
222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
223 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
224 loff_t *, res, uint, wh);
225 #endif
226 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
227 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
228 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
229 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
230 #endif
231 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
232 _syscall2(int,sys_tkill,int,tid,int,sig)
233 #endif
234 #ifdef __NR_exit_group
235 _syscall1(int,exit_group,int,error_code)
236 #endif
237 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
238 _syscall1(int,set_tid_address,int *,tidptr)
239 #endif
240 #if defined(CONFIG_USE_NPTL)
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
243 const struct timespec *,timeout,int *,uaddr2,int,val3)
244 #endif
245 #endif
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
251 unsigned long *, user_mask_ptr);
252 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
253 void *, arg);
254
255 static bitmask_transtbl fcntl_flags_tbl[] = {
256 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
257 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
258 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
259 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
260 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
261 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
262 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
263 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
264 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
265 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
266 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
267 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
268 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
269 #if defined(O_DIRECT)
270 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
271 #endif
272 #if defined(O_NOATIME)
273 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
274 #endif
275 #if defined(O_CLOEXEC)
276 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
277 #endif
278 #if defined(O_PATH)
279 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
280 #endif
281 /* Don't terminate the list prematurely on 64-bit host+guest. */
282 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
283 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
284 #endif
285 { 0, 0, 0, 0 }
286 };
287
288 #define COPY_UTSNAME_FIELD(dest, src) \
289 do { \
290 /* __NEW_UTS_LEN doesn't include terminating null */ \
291 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
292 (dest)[__NEW_UTS_LEN] = '\0'; \
293 } while (0)
294
295 static int sys_uname(struct new_utsname *buf)
296 {
297 struct utsname uts_buf;
298
299 if (uname(&uts_buf) < 0)
300 return (-1);
301
302 /*
303 * Just in case these have some differences, we
304 * translate utsname to new_utsname (which is the
305 * struct linux kernel uses).
306 */
307
308 memset(buf, 0, sizeof(*buf));
309 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
310 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
311 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
312 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
313 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
314 #ifdef _GNU_SOURCE
315 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
316 #endif
317 return (0);
318
319 #undef COPY_UTSNAME_FIELD
320 }
321
322 static int sys_getcwd1(char *buf, size_t size)
323 {
324 if (getcwd(buf, size) == NULL) {
325 /* getcwd() sets errno */
326 return (-1);
327 }
328 return strlen(buf)+1;
329 }
330
331 #ifdef CONFIG_ATFILE
332 /*
333 * Host system seems to have atfile syscall stubs available. We
334 * now enable them one by one as specified by target syscall_nr.h.
335 */
336
337 #ifdef TARGET_NR_faccessat
338 static int sys_faccessat(int dirfd, const char *pathname, int mode)
339 {
340 return (faccessat(dirfd, pathname, mode, 0));
341 }
342 #endif
343 #ifdef TARGET_NR_fchmodat
344 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
345 {
346 return (fchmodat(dirfd, pathname, mode, 0));
347 }
348 #endif
349 #if defined(TARGET_NR_fchownat)
350 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
351 gid_t group, int flags)
352 {
353 return (fchownat(dirfd, pathname, owner, group, flags));
354 }
355 #endif
356 #ifdef __NR_fstatat64
357 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
358 int flags)
359 {
360 return (fstatat(dirfd, pathname, buf, flags));
361 }
362 #endif
363 #ifdef __NR_newfstatat
364 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
365 int flags)
366 {
367 return (fstatat(dirfd, pathname, buf, flags));
368 }
369 #endif
370 #ifdef TARGET_NR_futimesat
371 static int sys_futimesat(int dirfd, const char *pathname,
372 const struct timeval times[2])
373 {
374 return (futimesat(dirfd, pathname, times));
375 }
376 #endif
377 #ifdef TARGET_NR_linkat
378 static int sys_linkat(int olddirfd, const char *oldpath,
379 int newdirfd, const char *newpath, int flags)
380 {
381 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
382 }
383 #endif
384 #ifdef TARGET_NR_mkdirat
385 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
386 {
387 return (mkdirat(dirfd, pathname, mode));
388 }
389 #endif
390 #ifdef TARGET_NR_mknodat
391 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
392 dev_t dev)
393 {
394 return (mknodat(dirfd, pathname, mode, dev));
395 }
396 #endif
397 #ifdef TARGET_NR_openat
398 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
399 {
400 /*
401 * open(2) has extra parameter 'mode' when called with
402 * flag O_CREAT.
403 */
404 if ((flags & O_CREAT) != 0) {
405 return (openat(dirfd, pathname, flags, mode));
406 }
407 return (openat(dirfd, pathname, flags));
408 }
409 #endif
410 #ifdef TARGET_NR_readlinkat
411 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
412 {
413 return (readlinkat(dirfd, pathname, buf, bufsiz));
414 }
415 #endif
416 #ifdef TARGET_NR_renameat
417 static int sys_renameat(int olddirfd, const char *oldpath,
418 int newdirfd, const char *newpath)
419 {
420 return (renameat(olddirfd, oldpath, newdirfd, newpath));
421 }
422 #endif
423 #ifdef TARGET_NR_symlinkat
424 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
425 {
426 return (symlinkat(oldpath, newdirfd, newpath));
427 }
428 #endif
429 #ifdef TARGET_NR_unlinkat
430 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
431 {
432 return (unlinkat(dirfd, pathname, flags));
433 }
434 #endif
435 #else /* !CONFIG_ATFILE */
436
437 /*
438 * Try direct syscalls instead
439 */
440 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
441 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
442 #endif
443 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
444 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
445 #endif
446 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
447 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
448 uid_t,owner,gid_t,group,int,flags)
449 #endif
450 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
451 defined(__NR_fstatat64)
452 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
453 struct stat *,buf,int,flags)
454 #endif
455 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
456 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
457 const struct timeval *,times)
458 #endif
459 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
460 defined(__NR_newfstatat)
461 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
462 struct stat *,buf,int,flags)
463 #endif
464 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
465 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
466 int,newdirfd,const char *,newpath,int,flags)
467 #endif
468 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
469 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
470 #endif
471 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
472 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
473 mode_t,mode,dev_t,dev)
474 #endif
475 #if defined(TARGET_NR_openat) && defined(__NR_openat)
476 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
477 #endif
478 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
479 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
480 char *,buf,size_t,bufsize)
481 #endif
482 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
483 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
484 int,newdirfd,const char *,newpath)
485 #endif
486 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
487 _syscall3(int,sys_symlinkat,const char *,oldpath,
488 int,newdirfd,const char *,newpath)
489 #endif
490 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
491 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
492 #endif
493
494 #endif /* CONFIG_ATFILE */
495
496 #ifdef CONFIG_UTIMENSAT
497 static int sys_utimensat(int dirfd, const char *pathname,
498 const struct timespec times[2], int flags)
499 {
500 if (pathname == NULL)
501 return futimens(dirfd, times);
502 else
503 return utimensat(dirfd, pathname, times, flags);
504 }
505 #else
506 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
507 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
508 const struct timespec *,tsp,int,flags)
509 #endif
510 #endif /* CONFIG_UTIMENSAT */
511
512 #ifdef CONFIG_INOTIFY
513 #include <sys/inotify.h>
514
515 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
516 static int sys_inotify_init(void)
517 {
518 return (inotify_init());
519 }
520 #endif
521 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
522 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
523 {
524 return (inotify_add_watch(fd, pathname, mask));
525 }
526 #endif
527 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
528 static int sys_inotify_rm_watch(int fd, int32_t wd)
529 {
530 return (inotify_rm_watch(fd, wd));
531 }
532 #endif
533 #ifdef CONFIG_INOTIFY1
534 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
535 static int sys_inotify_init1(int flags)
536 {
537 return (inotify_init1(flags));
538 }
539 #endif
540 #endif
541 #else
542 /* Userspace can usually survive runtime without inotify */
543 #undef TARGET_NR_inotify_init
544 #undef TARGET_NR_inotify_init1
545 #undef TARGET_NR_inotify_add_watch
546 #undef TARGET_NR_inotify_rm_watch
547 #endif /* CONFIG_INOTIFY */
548
549 #if defined(TARGET_NR_ppoll)
550 #ifndef __NR_ppoll
551 # define __NR_ppoll -1
552 #endif
553 #define __NR_sys_ppoll __NR_ppoll
554 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
555 struct timespec *, timeout, const __sigset_t *, sigmask,
556 size_t, sigsetsize)
557 #endif
558
559 #if defined(TARGET_NR_pselect6)
560 #ifndef __NR_pselect6
561 # define __NR_pselect6 -1
562 #endif
563 #define __NR_sys_pselect6 __NR_pselect6
564 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
565 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
566 #endif
567
568 #if defined(TARGET_NR_prlimit64)
569 #ifndef __NR_prlimit64
570 # define __NR_prlimit64 -1
571 #endif
572 #define __NR_sys_prlimit64 __NR_prlimit64
573 /* The glibc rlimit structure may not be that used by the underlying syscall */
574 struct host_rlimit64 {
575 uint64_t rlim_cur;
576 uint64_t rlim_max;
577 };
578 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
579 const struct host_rlimit64 *, new_limit,
580 struct host_rlimit64 *, old_limit)
581 #endif
582
583 extern int personality(int);
584 extern int flock(int, int);
585 extern int setfsuid(int);
586 extern int setfsgid(int);
587 extern int setgroups(int, gid_t *);
588
589 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
590 #ifdef TARGET_ARM
591 static inline int regpairs_aligned(void *cpu_env) {
592 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
593 }
594 #elif defined(TARGET_MIPS)
595 static inline int regpairs_aligned(void *cpu_env) { return 1; }
596 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
597 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
598 * of registers which translates to the same as ARM/MIPS, because we start with
599 * r3 as arg1 */
600 static inline int regpairs_aligned(void *cpu_env) { return 1; }
601 #else
602 static inline int regpairs_aligned(void *cpu_env) { return 0; }
603 #endif
604
605 #define ERRNO_TABLE_SIZE 1200
606
607 /* target_to_host_errno_table[] is initialized from
608 * host_to_target_errno_table[] in syscall_init(). */
609 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
610 };
611
612 /*
613 * This list is the union of errno values overridden in asm-<arch>/errno.h
614 * minus the errnos that are not actually generic to all archs.
615 */
616 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
617 [EIDRM] = TARGET_EIDRM,
618 [ECHRNG] = TARGET_ECHRNG,
619 [EL2NSYNC] = TARGET_EL2NSYNC,
620 [EL3HLT] = TARGET_EL3HLT,
621 [EL3RST] = TARGET_EL3RST,
622 [ELNRNG] = TARGET_ELNRNG,
623 [EUNATCH] = TARGET_EUNATCH,
624 [ENOCSI] = TARGET_ENOCSI,
625 [EL2HLT] = TARGET_EL2HLT,
626 [EDEADLK] = TARGET_EDEADLK,
627 [ENOLCK] = TARGET_ENOLCK,
628 [EBADE] = TARGET_EBADE,
629 [EBADR] = TARGET_EBADR,
630 [EXFULL] = TARGET_EXFULL,
631 [ENOANO] = TARGET_ENOANO,
632 [EBADRQC] = TARGET_EBADRQC,
633 [EBADSLT] = TARGET_EBADSLT,
634 [EBFONT] = TARGET_EBFONT,
635 [ENOSTR] = TARGET_ENOSTR,
636 [ENODATA] = TARGET_ENODATA,
637 [ETIME] = TARGET_ETIME,
638 [ENOSR] = TARGET_ENOSR,
639 [ENONET] = TARGET_ENONET,
640 [ENOPKG] = TARGET_ENOPKG,
641 [EREMOTE] = TARGET_EREMOTE,
642 [ENOLINK] = TARGET_ENOLINK,
643 [EADV] = TARGET_EADV,
644 [ESRMNT] = TARGET_ESRMNT,
645 [ECOMM] = TARGET_ECOMM,
646 [EPROTO] = TARGET_EPROTO,
647 [EDOTDOT] = TARGET_EDOTDOT,
648 [EMULTIHOP] = TARGET_EMULTIHOP,
649 [EBADMSG] = TARGET_EBADMSG,
650 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
651 [EOVERFLOW] = TARGET_EOVERFLOW,
652 [ENOTUNIQ] = TARGET_ENOTUNIQ,
653 [EBADFD] = TARGET_EBADFD,
654 [EREMCHG] = TARGET_EREMCHG,
655 [ELIBACC] = TARGET_ELIBACC,
656 [ELIBBAD] = TARGET_ELIBBAD,
657 [ELIBSCN] = TARGET_ELIBSCN,
658 [ELIBMAX] = TARGET_ELIBMAX,
659 [ELIBEXEC] = TARGET_ELIBEXEC,
660 [EILSEQ] = TARGET_EILSEQ,
661 [ENOSYS] = TARGET_ENOSYS,
662 [ELOOP] = TARGET_ELOOP,
663 [ERESTART] = TARGET_ERESTART,
664 [ESTRPIPE] = TARGET_ESTRPIPE,
665 [ENOTEMPTY] = TARGET_ENOTEMPTY,
666 [EUSERS] = TARGET_EUSERS,
667 [ENOTSOCK] = TARGET_ENOTSOCK,
668 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
669 [EMSGSIZE] = TARGET_EMSGSIZE,
670 [EPROTOTYPE] = TARGET_EPROTOTYPE,
671 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
672 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
673 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
674 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
675 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
676 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
677 [EADDRINUSE] = TARGET_EADDRINUSE,
678 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
679 [ENETDOWN] = TARGET_ENETDOWN,
680 [ENETUNREACH] = TARGET_ENETUNREACH,
681 [ENETRESET] = TARGET_ENETRESET,
682 [ECONNABORTED] = TARGET_ECONNABORTED,
683 [ECONNRESET] = TARGET_ECONNRESET,
684 [ENOBUFS] = TARGET_ENOBUFS,
685 [EISCONN] = TARGET_EISCONN,
686 [ENOTCONN] = TARGET_ENOTCONN,
687 [EUCLEAN] = TARGET_EUCLEAN,
688 [ENOTNAM] = TARGET_ENOTNAM,
689 [ENAVAIL] = TARGET_ENAVAIL,
690 [EISNAM] = TARGET_EISNAM,
691 [EREMOTEIO] = TARGET_EREMOTEIO,
692 [ESHUTDOWN] = TARGET_ESHUTDOWN,
693 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
694 [ETIMEDOUT] = TARGET_ETIMEDOUT,
695 [ECONNREFUSED] = TARGET_ECONNREFUSED,
696 [EHOSTDOWN] = TARGET_EHOSTDOWN,
697 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
698 [EALREADY] = TARGET_EALREADY,
699 [EINPROGRESS] = TARGET_EINPROGRESS,
700 [ESTALE] = TARGET_ESTALE,
701 [ECANCELED] = TARGET_ECANCELED,
702 [ENOMEDIUM] = TARGET_ENOMEDIUM,
703 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
704 #ifdef ENOKEY
705 [ENOKEY] = TARGET_ENOKEY,
706 #endif
707 #ifdef EKEYEXPIRED
708 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
709 #endif
710 #ifdef EKEYREVOKED
711 [EKEYREVOKED] = TARGET_EKEYREVOKED,
712 #endif
713 #ifdef EKEYREJECTED
714 [EKEYREJECTED] = TARGET_EKEYREJECTED,
715 #endif
716 #ifdef EOWNERDEAD
717 [EOWNERDEAD] = TARGET_EOWNERDEAD,
718 #endif
719 #ifdef ENOTRECOVERABLE
720 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
721 #endif
722 };
723
724 static inline int host_to_target_errno(int err)
725 {
726 if(host_to_target_errno_table[err])
727 return host_to_target_errno_table[err];
728 return err;
729 }
730
731 static inline int target_to_host_errno(int err)
732 {
733 if (target_to_host_errno_table[err])
734 return target_to_host_errno_table[err];
735 return err;
736 }
737
738 static inline abi_long get_errno(abi_long ret)
739 {
740 if (ret == -1)
741 return -host_to_target_errno(errno);
742 else
743 return ret;
744 }
745
746 static inline int is_error(abi_long ret)
747 {
748 return (abi_ulong)ret >= (abi_ulong)(-4096);
749 }
750
751 char *target_strerror(int err)
752 {
753 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
754 return NULL;
755 }
756 return strerror(target_to_host_errno(err));
757 }
758
759 static abi_ulong target_brk;
760 static abi_ulong target_original_brk;
761 static abi_ulong brk_page;
762
763 void target_set_brk(abi_ulong new_brk)
764 {
765 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
766 brk_page = HOST_PAGE_ALIGN(target_brk);
767 }
768
769 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
770 #define DEBUGF_BRK(message, args...)
771
772 /* do_brk() must return target values and target errnos. */
773 abi_long do_brk(abi_ulong new_brk)
774 {
775 abi_long mapped_addr;
776 int new_alloc_size;
777
778 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
779
780 if (!new_brk) {
781 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
782 return target_brk;
783 }
784 if (new_brk < target_original_brk) {
785 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
786 target_brk);
787 return target_brk;
788 }
789
790 /* If the new brk is less than the highest page reserved to the
791 * target heap allocation, set it and we're almost done... */
792 if (new_brk <= brk_page) {
793 /* Heap contents are initialized to zero, as for anonymous
794 * mapped pages. */
795 if (new_brk > target_brk) {
796 memset(g2h(target_brk), 0, new_brk - target_brk);
797 }
798 target_brk = new_brk;
799 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
800 return target_brk;
801 }
802
803 /* We need to allocate more memory after the brk... Note that
804 * we don't use MAP_FIXED because that will map over the top of
805 * any existing mapping (like the one with the host libc or qemu
806 * itself); instead we treat "mapped but at wrong address" as
807 * a failure and unmap again.
808 */
809 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
810 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
811 PROT_READ|PROT_WRITE,
812 MAP_ANON|MAP_PRIVATE, 0, 0));
813
814 if (mapped_addr == brk_page) {
815 /* Heap contents are initialized to zero, as for anonymous
816 * mapped pages. Technically the new pages are already
817 * initialized to zero since they *are* anonymous mapped
818 * pages, however we have to take care with the contents that
819 * come from the remaining part of the previous page: it may
820 * contains garbage data due to a previous heap usage (grown
821 * then shrunken). */
822 memset(g2h(target_brk), 0, brk_page - target_brk);
823
824 target_brk = new_brk;
825 brk_page = HOST_PAGE_ALIGN(target_brk);
826 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
827 target_brk);
828 return target_brk;
829 } else if (mapped_addr != -1) {
830 /* Mapped but at wrong address, meaning there wasn't actually
831 * enough space for this brk.
832 */
833 target_munmap(mapped_addr, new_alloc_size);
834 mapped_addr = -1;
835 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
836 }
837 else {
838 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
839 }
840
841 #if defined(TARGET_ALPHA)
842 /* We (partially) emulate OSF/1 on Alpha, which requires we
843 return a proper errno, not an unchanged brk value. */
844 return -TARGET_ENOMEM;
845 #endif
846 /* For everything else, return the previous break. */
847 return target_brk;
848 }
849
850 static inline abi_long copy_from_user_fdset(fd_set *fds,
851 abi_ulong target_fds_addr,
852 int n)
853 {
854 int i, nw, j, k;
855 abi_ulong b, *target_fds;
856
857 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
858 if (!(target_fds = lock_user(VERIFY_READ,
859 target_fds_addr,
860 sizeof(abi_ulong) * nw,
861 1)))
862 return -TARGET_EFAULT;
863
864 FD_ZERO(fds);
865 k = 0;
866 for (i = 0; i < nw; i++) {
867 /* grab the abi_ulong */
868 __get_user(b, &target_fds[i]);
869 for (j = 0; j < TARGET_ABI_BITS; j++) {
870 /* check the bit inside the abi_ulong */
871 if ((b >> j) & 1)
872 FD_SET(k, fds);
873 k++;
874 }
875 }
876
877 unlock_user(target_fds, target_fds_addr, 0);
878
879 return 0;
880 }
881
882 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
883 abi_ulong target_fds_addr,
884 int n)
885 {
886 if (target_fds_addr) {
887 if (copy_from_user_fdset(fds, target_fds_addr, n))
888 return -TARGET_EFAULT;
889 *fds_ptr = fds;
890 } else {
891 *fds_ptr = NULL;
892 }
893 return 0;
894 }
895
896 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
897 const fd_set *fds,
898 int n)
899 {
900 int i, nw, j, k;
901 abi_long v;
902 abi_ulong *target_fds;
903
904 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
905 if (!(target_fds = lock_user(VERIFY_WRITE,
906 target_fds_addr,
907 sizeof(abi_ulong) * nw,
908 0)))
909 return -TARGET_EFAULT;
910
911 k = 0;
912 for (i = 0; i < nw; i++) {
913 v = 0;
914 for (j = 0; j < TARGET_ABI_BITS; j++) {
915 v |= ((FD_ISSET(k, fds) != 0) << j);
916 k++;
917 }
918 __put_user(v, &target_fds[i]);
919 }
920
921 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
922
923 return 0;
924 }
925
926 #if defined(__alpha__)
927 #define HOST_HZ 1024
928 #else
929 #define HOST_HZ 100
930 #endif
931
932 static inline abi_long host_to_target_clock_t(long ticks)
933 {
934 #if HOST_HZ == TARGET_HZ
935 return ticks;
936 #else
937 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
938 #endif
939 }
940
941 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
942 const struct rusage *rusage)
943 {
944 struct target_rusage *target_rusage;
945
946 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
947 return -TARGET_EFAULT;
948 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
949 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
950 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
951 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
952 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
953 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
954 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
955 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
956 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
957 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
958 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
959 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
960 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
961 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
962 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
963 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
964 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
965 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
966 unlock_user_struct(target_rusage, target_addr, 1);
967
968 return 0;
969 }
970
971 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
972 {
973 abi_ulong target_rlim_swap;
974 rlim_t result;
975
976 target_rlim_swap = tswapal(target_rlim);
977 if (target_rlim_swap == TARGET_RLIM_INFINITY)
978 return RLIM_INFINITY;
979
980 result = target_rlim_swap;
981 if (target_rlim_swap != (rlim_t)result)
982 return RLIM_INFINITY;
983
984 return result;
985 }
986
987 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
988 {
989 abi_ulong target_rlim_swap;
990 abi_ulong result;
991
992 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
993 target_rlim_swap = TARGET_RLIM_INFINITY;
994 else
995 target_rlim_swap = rlim;
996 result = tswapal(target_rlim_swap);
997
998 return result;
999 }
1000
1001 static inline int target_to_host_resource(int code)
1002 {
1003 switch (code) {
1004 case TARGET_RLIMIT_AS:
1005 return RLIMIT_AS;
1006 case TARGET_RLIMIT_CORE:
1007 return RLIMIT_CORE;
1008 case TARGET_RLIMIT_CPU:
1009 return RLIMIT_CPU;
1010 case TARGET_RLIMIT_DATA:
1011 return RLIMIT_DATA;
1012 case TARGET_RLIMIT_FSIZE:
1013 return RLIMIT_FSIZE;
1014 case TARGET_RLIMIT_LOCKS:
1015 return RLIMIT_LOCKS;
1016 case TARGET_RLIMIT_MEMLOCK:
1017 return RLIMIT_MEMLOCK;
1018 case TARGET_RLIMIT_MSGQUEUE:
1019 return RLIMIT_MSGQUEUE;
1020 case TARGET_RLIMIT_NICE:
1021 return RLIMIT_NICE;
1022 case TARGET_RLIMIT_NOFILE:
1023 return RLIMIT_NOFILE;
1024 case TARGET_RLIMIT_NPROC:
1025 return RLIMIT_NPROC;
1026 case TARGET_RLIMIT_RSS:
1027 return RLIMIT_RSS;
1028 case TARGET_RLIMIT_RTPRIO:
1029 return RLIMIT_RTPRIO;
1030 case TARGET_RLIMIT_SIGPENDING:
1031 return RLIMIT_SIGPENDING;
1032 case TARGET_RLIMIT_STACK:
1033 return RLIMIT_STACK;
1034 default:
1035 return code;
1036 }
1037 }
1038
1039 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1040 abi_ulong target_tv_addr)
1041 {
1042 struct target_timeval *target_tv;
1043
1044 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1045 return -TARGET_EFAULT;
1046
1047 __get_user(tv->tv_sec, &target_tv->tv_sec);
1048 __get_user(tv->tv_usec, &target_tv->tv_usec);
1049
1050 unlock_user_struct(target_tv, target_tv_addr, 0);
1051
1052 return 0;
1053 }
1054
1055 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1056 const struct timeval *tv)
1057 {
1058 struct target_timeval *target_tv;
1059
1060 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1061 return -TARGET_EFAULT;
1062
1063 __put_user(tv->tv_sec, &target_tv->tv_sec);
1064 __put_user(tv->tv_usec, &target_tv->tv_usec);
1065
1066 unlock_user_struct(target_tv, target_tv_addr, 1);
1067
1068 return 0;
1069 }
1070
1071 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1072 #include <mqueue.h>
1073
1074 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1075 abi_ulong target_mq_attr_addr)
1076 {
1077 struct target_mq_attr *target_mq_attr;
1078
1079 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1080 target_mq_attr_addr, 1))
1081 return -TARGET_EFAULT;
1082
1083 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1084 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1085 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1086 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1087
1088 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1089
1090 return 0;
1091 }
1092
1093 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1094 const struct mq_attr *attr)
1095 {
1096 struct target_mq_attr *target_mq_attr;
1097
1098 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1099 target_mq_attr_addr, 0))
1100 return -TARGET_EFAULT;
1101
1102 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1103 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1104 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1105 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1106
1107 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1108
1109 return 0;
1110 }
1111 #endif
1112
1113 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1114 /* do_select() must return target values and target errnos. */
1115 static abi_long do_select(int n,
1116 abi_ulong rfd_addr, abi_ulong wfd_addr,
1117 abi_ulong efd_addr, abi_ulong target_tv_addr)
1118 {
1119 fd_set rfds, wfds, efds;
1120 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1121 struct timeval tv, *tv_ptr;
1122 abi_long ret;
1123
1124 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1125 if (ret) {
1126 return ret;
1127 }
1128 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1129 if (ret) {
1130 return ret;
1131 }
1132 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1133 if (ret) {
1134 return ret;
1135 }
1136
1137 if (target_tv_addr) {
1138 if (copy_from_user_timeval(&tv, target_tv_addr))
1139 return -TARGET_EFAULT;
1140 tv_ptr = &tv;
1141 } else {
1142 tv_ptr = NULL;
1143 }
1144
1145 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1146
1147 if (!is_error(ret)) {
1148 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1149 return -TARGET_EFAULT;
1150 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1151 return -TARGET_EFAULT;
1152 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1153 return -TARGET_EFAULT;
1154
1155 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1156 return -TARGET_EFAULT;
1157 }
1158
1159 return ret;
1160 }
1161 #endif
1162
1163 static abi_long do_pipe2(int host_pipe[], int flags)
1164 {
1165 #ifdef CONFIG_PIPE2
1166 return pipe2(host_pipe, flags);
1167 #else
1168 return -ENOSYS;
1169 #endif
1170 }
1171
1172 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1173 int flags, int is_pipe2)
1174 {
1175 int host_pipe[2];
1176 abi_long ret;
1177 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1178
1179 if (is_error(ret))
1180 return get_errno(ret);
1181
1182 /* Several targets have special calling conventions for the original
1183 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1184 if (!is_pipe2) {
1185 #if defined(TARGET_ALPHA)
1186 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1187 return host_pipe[0];
1188 #elif defined(TARGET_MIPS)
1189 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1190 return host_pipe[0];
1191 #elif defined(TARGET_SH4)
1192 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1193 return host_pipe[0];
1194 #endif
1195 }
1196
1197 if (put_user_s32(host_pipe[0], pipedes)
1198 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1199 return -TARGET_EFAULT;
1200 return get_errno(ret);
1201 }
1202
1203 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1204 abi_ulong target_addr,
1205 socklen_t len)
1206 {
1207 struct target_ip_mreqn *target_smreqn;
1208
1209 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1210 if (!target_smreqn)
1211 return -TARGET_EFAULT;
1212 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1213 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1214 if (len == sizeof(struct target_ip_mreqn))
1215 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1216 unlock_user(target_smreqn, target_addr, 0);
1217
1218 return 0;
1219 }
1220
1221 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1222 abi_ulong target_addr,
1223 socklen_t len)
1224 {
1225 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1226 sa_family_t sa_family;
1227 struct target_sockaddr *target_saddr;
1228
1229 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1230 if (!target_saddr)
1231 return -TARGET_EFAULT;
1232
1233 sa_family = tswap16(target_saddr->sa_family);
1234
1235 /* Oops. The caller might send a incomplete sun_path; sun_path
1236 * must be terminated by \0 (see the manual page), but
1237 * unfortunately it is quite common to specify sockaddr_un
1238 * length as "strlen(x->sun_path)" while it should be
1239 * "strlen(...) + 1". We'll fix that here if needed.
1240 * Linux kernel has a similar feature.
1241 */
1242
1243 if (sa_family == AF_UNIX) {
1244 if (len < unix_maxlen && len > 0) {
1245 char *cp = (char*)target_saddr;
1246
1247 if ( cp[len-1] && !cp[len] )
1248 len++;
1249 }
1250 if (len > unix_maxlen)
1251 len = unix_maxlen;
1252 }
1253
1254 memcpy(addr, target_saddr, len);
1255 addr->sa_family = sa_family;
1256 unlock_user(target_saddr, target_addr, 0);
1257
1258 return 0;
1259 }
1260
1261 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1262 struct sockaddr *addr,
1263 socklen_t len)
1264 {
1265 struct target_sockaddr *target_saddr;
1266
1267 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1268 if (!target_saddr)
1269 return -TARGET_EFAULT;
1270 memcpy(target_saddr, addr, len);
1271 target_saddr->sa_family = tswap16(addr->sa_family);
1272 unlock_user(target_saddr, target_addr, len);
1273
1274 return 0;
1275 }
1276
1277 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1278 struct target_msghdr *target_msgh)
1279 {
1280 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1281 abi_long msg_controllen;
1282 abi_ulong target_cmsg_addr;
1283 struct target_cmsghdr *target_cmsg;
1284 socklen_t space = 0;
1285
1286 msg_controllen = tswapal(target_msgh->msg_controllen);
1287 if (msg_controllen < sizeof (struct target_cmsghdr))
1288 goto the_end;
1289 target_cmsg_addr = tswapal(target_msgh->msg_control);
1290 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1291 if (!target_cmsg)
1292 return -TARGET_EFAULT;
1293
1294 while (cmsg && target_cmsg) {
1295 void *data = CMSG_DATA(cmsg);
1296 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1297
1298 int len = tswapal(target_cmsg->cmsg_len)
1299 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1300
1301 space += CMSG_SPACE(len);
1302 if (space > msgh->msg_controllen) {
1303 space -= CMSG_SPACE(len);
1304 gemu_log("Host cmsg overflow\n");
1305 break;
1306 }
1307
1308 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1309 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1310 cmsg->cmsg_len = CMSG_LEN(len);
1311
1312 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1313 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1314 memcpy(data, target_data, len);
1315 } else {
1316 int *fd = (int *)data;
1317 int *target_fd = (int *)target_data;
1318 int i, numfds = len / sizeof(int);
1319
1320 for (i = 0; i < numfds; i++)
1321 fd[i] = tswap32(target_fd[i]);
1322 }
1323
1324 cmsg = CMSG_NXTHDR(msgh, cmsg);
1325 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1326 }
1327 unlock_user(target_cmsg, target_cmsg_addr, 0);
1328 the_end:
1329 msgh->msg_controllen = space;
1330 return 0;
1331 }
1332
1333 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1334 struct msghdr *msgh)
1335 {
1336 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1337 abi_long msg_controllen;
1338 abi_ulong target_cmsg_addr;
1339 struct target_cmsghdr *target_cmsg;
1340 socklen_t space = 0;
1341
1342 msg_controllen = tswapal(target_msgh->msg_controllen);
1343 if (msg_controllen < sizeof (struct target_cmsghdr))
1344 goto the_end;
1345 target_cmsg_addr = tswapal(target_msgh->msg_control);
1346 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1347 if (!target_cmsg)
1348 return -TARGET_EFAULT;
1349
1350 while (cmsg && target_cmsg) {
1351 void *data = CMSG_DATA(cmsg);
1352 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1353
1354 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1355
1356 space += TARGET_CMSG_SPACE(len);
1357 if (space > msg_controllen) {
1358 space -= TARGET_CMSG_SPACE(len);
1359 gemu_log("Target cmsg overflow\n");
1360 break;
1361 }
1362
1363 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1364 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1365 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1366
1367 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1368 (cmsg->cmsg_type == SCM_RIGHTS)) {
1369 int *fd = (int *)data;
1370 int *target_fd = (int *)target_data;
1371 int i, numfds = len / sizeof(int);
1372
1373 for (i = 0; i < numfds; i++)
1374 target_fd[i] = tswap32(fd[i]);
1375 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1376 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1377 (len == sizeof(struct timeval))) {
1378 /* copy struct timeval to target */
1379 struct timeval *tv = (struct timeval *)data;
1380 struct target_timeval *target_tv =
1381 (struct target_timeval *)target_data;
1382
1383 target_tv->tv_sec = tswapal(tv->tv_sec);
1384 target_tv->tv_usec = tswapal(tv->tv_usec);
1385 } else {
1386 gemu_log("Unsupported ancillary data: %d/%d\n",
1387 cmsg->cmsg_level, cmsg->cmsg_type);
1388 memcpy(target_data, data, len);
1389 }
1390
1391 cmsg = CMSG_NXTHDR(msgh, cmsg);
1392 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1393 }
1394 unlock_user(target_cmsg, target_cmsg_addr, space);
1395 the_end:
1396 target_msgh->msg_controllen = tswapal(space);
1397 return 0;
1398 }
1399
1400 /* do_setsockopt() Must return target values and target errnos. */
1401 static abi_long do_setsockopt(int sockfd, int level, int optname,
1402 abi_ulong optval_addr, socklen_t optlen)
1403 {
1404 abi_long ret;
1405 int val;
1406 struct ip_mreqn *ip_mreq;
1407 struct ip_mreq_source *ip_mreq_source;
1408
1409 switch(level) {
1410 case SOL_TCP:
1411 /* TCP options all take an 'int' value. */
1412 if (optlen < sizeof(uint32_t))
1413 return -TARGET_EINVAL;
1414
1415 if (get_user_u32(val, optval_addr))
1416 return -TARGET_EFAULT;
1417 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1418 break;
1419 case SOL_IP:
1420 switch(optname) {
1421 case IP_TOS:
1422 case IP_TTL:
1423 case IP_HDRINCL:
1424 case IP_ROUTER_ALERT:
1425 case IP_RECVOPTS:
1426 case IP_RETOPTS:
1427 case IP_PKTINFO:
1428 case IP_MTU_DISCOVER:
1429 case IP_RECVERR:
1430 case IP_RECVTOS:
1431 #ifdef IP_FREEBIND
1432 case IP_FREEBIND:
1433 #endif
1434 case IP_MULTICAST_TTL:
1435 case IP_MULTICAST_LOOP:
1436 val = 0;
1437 if (optlen >= sizeof(uint32_t)) {
1438 if (get_user_u32(val, optval_addr))
1439 return -TARGET_EFAULT;
1440 } else if (optlen >= 1) {
1441 if (get_user_u8(val, optval_addr))
1442 return -TARGET_EFAULT;
1443 }
1444 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1445 break;
1446 case IP_ADD_MEMBERSHIP:
1447 case IP_DROP_MEMBERSHIP:
1448 if (optlen < sizeof (struct target_ip_mreq) ||
1449 optlen > sizeof (struct target_ip_mreqn))
1450 return -TARGET_EINVAL;
1451
1452 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1453 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1454 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1455 break;
1456
1457 case IP_BLOCK_SOURCE:
1458 case IP_UNBLOCK_SOURCE:
1459 case IP_ADD_SOURCE_MEMBERSHIP:
1460 case IP_DROP_SOURCE_MEMBERSHIP:
1461 if (optlen != sizeof (struct target_ip_mreq_source))
1462 return -TARGET_EINVAL;
1463
1464 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1465 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1466 unlock_user (ip_mreq_source, optval_addr, 0);
1467 break;
1468
1469 default:
1470 goto unimplemented;
1471 }
1472 break;
1473 case SOL_RAW:
1474 switch (optname) {
1475 case ICMP_FILTER:
1476 /* struct icmp_filter takes an u32 value */
1477 if (optlen < sizeof(uint32_t)) {
1478 return -TARGET_EINVAL;
1479 }
1480
1481 if (get_user_u32(val, optval_addr)) {
1482 return -TARGET_EFAULT;
1483 }
1484 ret = get_errno(setsockopt(sockfd, level, optname,
1485 &val, sizeof(val)));
1486 break;
1487
1488 default:
1489 goto unimplemented;
1490 }
1491 break;
1492 case TARGET_SOL_SOCKET:
1493 switch (optname) {
1494 /* Options with 'int' argument. */
1495 case TARGET_SO_DEBUG:
1496 optname = SO_DEBUG;
1497 break;
1498 case TARGET_SO_REUSEADDR:
1499 optname = SO_REUSEADDR;
1500 break;
1501 case TARGET_SO_TYPE:
1502 optname = SO_TYPE;
1503 break;
1504 case TARGET_SO_ERROR:
1505 optname = SO_ERROR;
1506 break;
1507 case TARGET_SO_DONTROUTE:
1508 optname = SO_DONTROUTE;
1509 break;
1510 case TARGET_SO_BROADCAST:
1511 optname = SO_BROADCAST;
1512 break;
1513 case TARGET_SO_SNDBUF:
1514 optname = SO_SNDBUF;
1515 break;
1516 case TARGET_SO_RCVBUF:
1517 optname = SO_RCVBUF;
1518 break;
1519 case TARGET_SO_KEEPALIVE:
1520 optname = SO_KEEPALIVE;
1521 break;
1522 case TARGET_SO_OOBINLINE:
1523 optname = SO_OOBINLINE;
1524 break;
1525 case TARGET_SO_NO_CHECK:
1526 optname = SO_NO_CHECK;
1527 break;
1528 case TARGET_SO_PRIORITY:
1529 optname = SO_PRIORITY;
1530 break;
1531 #ifdef SO_BSDCOMPAT
1532 case TARGET_SO_BSDCOMPAT:
1533 optname = SO_BSDCOMPAT;
1534 break;
1535 #endif
1536 case TARGET_SO_PASSCRED:
1537 optname = SO_PASSCRED;
1538 break;
1539 case TARGET_SO_TIMESTAMP:
1540 optname = SO_TIMESTAMP;
1541 break;
1542 case TARGET_SO_RCVLOWAT:
1543 optname = SO_RCVLOWAT;
1544 break;
1545 case TARGET_SO_RCVTIMEO:
1546 optname = SO_RCVTIMEO;
1547 break;
1548 case TARGET_SO_SNDTIMEO:
1549 optname = SO_SNDTIMEO;
1550 break;
1551 break;
1552 default:
1553 goto unimplemented;
1554 }
1555 if (optlen < sizeof(uint32_t))
1556 return -TARGET_EINVAL;
1557
1558 if (get_user_u32(val, optval_addr))
1559 return -TARGET_EFAULT;
1560 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1561 break;
1562 default:
1563 unimplemented:
1564 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1565 ret = -TARGET_ENOPROTOOPT;
1566 }
1567 return ret;
1568 }
1569
1570 /* do_getsockopt() Must return target values and target errnos. */
1571 static abi_long do_getsockopt(int sockfd, int level, int optname,
1572 abi_ulong optval_addr, abi_ulong optlen)
1573 {
1574 abi_long ret;
1575 int len, val;
1576 socklen_t lv;
1577
1578 switch(level) {
1579 case TARGET_SOL_SOCKET:
1580 level = SOL_SOCKET;
1581 switch (optname) {
1582 /* These don't just return a single integer */
1583 case TARGET_SO_LINGER:
1584 case TARGET_SO_RCVTIMEO:
1585 case TARGET_SO_SNDTIMEO:
1586 case TARGET_SO_PEERNAME:
1587 goto unimplemented;
1588 case TARGET_SO_PEERCRED: {
1589 struct ucred cr;
1590 socklen_t crlen;
1591 struct target_ucred *tcr;
1592
1593 if (get_user_u32(len, optlen)) {
1594 return -TARGET_EFAULT;
1595 }
1596 if (len < 0) {
1597 return -TARGET_EINVAL;
1598 }
1599
1600 crlen = sizeof(cr);
1601 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1602 &cr, &crlen));
1603 if (ret < 0) {
1604 return ret;
1605 }
1606 if (len > crlen) {
1607 len = crlen;
1608 }
1609 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1610 return -TARGET_EFAULT;
1611 }
1612 __put_user(cr.pid, &tcr->pid);
1613 __put_user(cr.uid, &tcr->uid);
1614 __put_user(cr.gid, &tcr->gid);
1615 unlock_user_struct(tcr, optval_addr, 1);
1616 if (put_user_u32(len, optlen)) {
1617 return -TARGET_EFAULT;
1618 }
1619 break;
1620 }
1621 /* Options with 'int' argument. */
1622 case TARGET_SO_DEBUG:
1623 optname = SO_DEBUG;
1624 goto int_case;
1625 case TARGET_SO_REUSEADDR:
1626 optname = SO_REUSEADDR;
1627 goto int_case;
1628 case TARGET_SO_TYPE:
1629 optname = SO_TYPE;
1630 goto int_case;
1631 case TARGET_SO_ERROR:
1632 optname = SO_ERROR;
1633 goto int_case;
1634 case TARGET_SO_DONTROUTE:
1635 optname = SO_DONTROUTE;
1636 goto int_case;
1637 case TARGET_SO_BROADCAST:
1638 optname = SO_BROADCAST;
1639 goto int_case;
1640 case TARGET_SO_SNDBUF:
1641 optname = SO_SNDBUF;
1642 goto int_case;
1643 case TARGET_SO_RCVBUF:
1644 optname = SO_RCVBUF;
1645 goto int_case;
1646 case TARGET_SO_KEEPALIVE:
1647 optname = SO_KEEPALIVE;
1648 goto int_case;
1649 case TARGET_SO_OOBINLINE:
1650 optname = SO_OOBINLINE;
1651 goto int_case;
1652 case TARGET_SO_NO_CHECK:
1653 optname = SO_NO_CHECK;
1654 goto int_case;
1655 case TARGET_SO_PRIORITY:
1656 optname = SO_PRIORITY;
1657 goto int_case;
1658 #ifdef SO_BSDCOMPAT
1659 case TARGET_SO_BSDCOMPAT:
1660 optname = SO_BSDCOMPAT;
1661 goto int_case;
1662 #endif
1663 case TARGET_SO_PASSCRED:
1664 optname = SO_PASSCRED;
1665 goto int_case;
1666 case TARGET_SO_TIMESTAMP:
1667 optname = SO_TIMESTAMP;
1668 goto int_case;
1669 case TARGET_SO_RCVLOWAT:
1670 optname = SO_RCVLOWAT;
1671 goto int_case;
1672 default:
1673 goto int_case;
1674 }
1675 break;
1676 case SOL_TCP:
1677 /* TCP options all take an 'int' value. */
1678 int_case:
1679 if (get_user_u32(len, optlen))
1680 return -TARGET_EFAULT;
1681 if (len < 0)
1682 return -TARGET_EINVAL;
1683 lv = sizeof(lv);
1684 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1685 if (ret < 0)
1686 return ret;
1687 if (len > lv)
1688 len = lv;
1689 if (len == 4) {
1690 if (put_user_u32(val, optval_addr))
1691 return -TARGET_EFAULT;
1692 } else {
1693 if (put_user_u8(val, optval_addr))
1694 return -TARGET_EFAULT;
1695 }
1696 if (put_user_u32(len, optlen))
1697 return -TARGET_EFAULT;
1698 break;
1699 case SOL_IP:
1700 switch(optname) {
1701 case IP_TOS:
1702 case IP_TTL:
1703 case IP_HDRINCL:
1704 case IP_ROUTER_ALERT:
1705 case IP_RECVOPTS:
1706 case IP_RETOPTS:
1707 case IP_PKTINFO:
1708 case IP_MTU_DISCOVER:
1709 case IP_RECVERR:
1710 case IP_RECVTOS:
1711 #ifdef IP_FREEBIND
1712 case IP_FREEBIND:
1713 #endif
1714 case IP_MULTICAST_TTL:
1715 case IP_MULTICAST_LOOP:
1716 if (get_user_u32(len, optlen))
1717 return -TARGET_EFAULT;
1718 if (len < 0)
1719 return -TARGET_EINVAL;
1720 lv = sizeof(lv);
1721 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1722 if (ret < 0)
1723 return ret;
1724 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1725 len = 1;
1726 if (put_user_u32(len, optlen)
1727 || put_user_u8(val, optval_addr))
1728 return -TARGET_EFAULT;
1729 } else {
1730 if (len > sizeof(int))
1731 len = sizeof(int);
1732 if (put_user_u32(len, optlen)
1733 || put_user_u32(val, optval_addr))
1734 return -TARGET_EFAULT;
1735 }
1736 break;
1737 default:
1738 ret = -TARGET_ENOPROTOOPT;
1739 break;
1740 }
1741 break;
1742 default:
1743 unimplemented:
1744 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1745 level, optname);
1746 ret = -TARGET_EOPNOTSUPP;
1747 break;
1748 }
1749 return ret;
1750 }
1751
1752 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1753 int count, int copy)
1754 {
1755 struct target_iovec *target_vec;
1756 struct iovec *vec;
1757 abi_ulong total_len, max_len;
1758 int i;
1759
1760 if (count == 0) {
1761 errno = 0;
1762 return NULL;
1763 }
1764 if (count > IOV_MAX) {
1765 errno = EINVAL;
1766 return NULL;
1767 }
1768
1769 vec = calloc(count, sizeof(struct iovec));
1770 if (vec == NULL) {
1771 errno = ENOMEM;
1772 return NULL;
1773 }
1774
1775 target_vec = lock_user(VERIFY_READ, target_addr,
1776 count * sizeof(struct target_iovec), 1);
1777 if (target_vec == NULL) {
1778 errno = EFAULT;
1779 goto fail2;
1780 }
1781
1782 /* ??? If host page size > target page size, this will result in a
1783 value larger than what we can actually support. */
1784 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1785 total_len = 0;
1786
1787 for (i = 0; i < count; i++) {
1788 abi_ulong base = tswapal(target_vec[i].iov_base);
1789 abi_long len = tswapal(target_vec[i].iov_len);
1790
1791 if (len < 0) {
1792 errno = EINVAL;
1793 goto fail;
1794 } else if (len == 0) {
1795 /* Zero length pointer is ignored. */
1796 vec[i].iov_base = 0;
1797 } else {
1798 vec[i].iov_base = lock_user(type, base, len, copy);
1799 if (!vec[i].iov_base) {
1800 errno = EFAULT;
1801 goto fail;
1802 }
1803 if (len > max_len - total_len) {
1804 len = max_len - total_len;
1805 }
1806 }
1807 vec[i].iov_len = len;
1808 total_len += len;
1809 }
1810
1811 unlock_user(target_vec, target_addr, 0);
1812 return vec;
1813
1814 fail:
1815 free(vec);
1816 fail2:
1817 unlock_user(target_vec, target_addr, 0);
1818 return NULL;
1819 }
1820
1821 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1822 int count, int copy)
1823 {
1824 struct target_iovec *target_vec;
1825 int i;
1826
1827 target_vec = lock_user(VERIFY_READ, target_addr,
1828 count * sizeof(struct target_iovec), 1);
1829 if (target_vec) {
1830 for (i = 0; i < count; i++) {
1831 abi_ulong base = tswapal(target_vec[i].iov_base);
1832 abi_long len = tswapal(target_vec[i].iov_base);
1833 if (len < 0) {
1834 break;
1835 }
1836 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1837 }
1838 unlock_user(target_vec, target_addr, 0);
1839 }
1840
1841 free(vec);
1842 }
1843
1844 /* do_socket() Must return target values and target errnos. */
1845 static abi_long do_socket(int domain, int type, int protocol)
1846 {
1847 #if defined(TARGET_MIPS)
1848 switch(type) {
1849 case TARGET_SOCK_DGRAM:
1850 type = SOCK_DGRAM;
1851 break;
1852 case TARGET_SOCK_STREAM:
1853 type = SOCK_STREAM;
1854 break;
1855 case TARGET_SOCK_RAW:
1856 type = SOCK_RAW;
1857 break;
1858 case TARGET_SOCK_RDM:
1859 type = SOCK_RDM;
1860 break;
1861 case TARGET_SOCK_SEQPACKET:
1862 type = SOCK_SEQPACKET;
1863 break;
1864 case TARGET_SOCK_PACKET:
1865 type = SOCK_PACKET;
1866 break;
1867 }
1868 #endif
1869 if (domain == PF_NETLINK)
1870 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1871 return get_errno(socket(domain, type, protocol));
1872 }
1873
1874 /* do_bind() Must return target values and target errnos. */
1875 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1876 socklen_t addrlen)
1877 {
1878 void *addr;
1879 abi_long ret;
1880
1881 if ((int)addrlen < 0) {
1882 return -TARGET_EINVAL;
1883 }
1884
1885 addr = alloca(addrlen+1);
1886
1887 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1888 if (ret)
1889 return ret;
1890
1891 return get_errno(bind(sockfd, addr, addrlen));
1892 }
1893
1894 /* do_connect() Must return target values and target errnos. */
1895 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1896 socklen_t addrlen)
1897 {
1898 void *addr;
1899 abi_long ret;
1900
1901 if ((int)addrlen < 0) {
1902 return -TARGET_EINVAL;
1903 }
1904
1905 addr = alloca(addrlen);
1906
1907 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1908 if (ret)
1909 return ret;
1910
1911 return get_errno(connect(sockfd, addr, addrlen));
1912 }
1913
1914 /* do_sendrecvmsg() Must return target values and target errnos. */
1915 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1916 int flags, int send)
1917 {
1918 abi_long ret, len;
1919 struct target_msghdr *msgp;
1920 struct msghdr msg;
1921 int count;
1922 struct iovec *vec;
1923 abi_ulong target_vec;
1924
1925 /* FIXME */
1926 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1927 msgp,
1928 target_msg,
1929 send ? 1 : 0))
1930 return -TARGET_EFAULT;
1931 if (msgp->msg_name) {
1932 msg.msg_namelen = tswap32(msgp->msg_namelen);
1933 msg.msg_name = alloca(msg.msg_namelen);
1934 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1935 msg.msg_namelen);
1936 if (ret) {
1937 goto out2;
1938 }
1939 } else {
1940 msg.msg_name = NULL;
1941 msg.msg_namelen = 0;
1942 }
1943 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1944 msg.msg_control = alloca(msg.msg_controllen);
1945 msg.msg_flags = tswap32(msgp->msg_flags);
1946
1947 count = tswapal(msgp->msg_iovlen);
1948 target_vec = tswapal(msgp->msg_iov);
1949 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1950 target_vec, count, send);
1951 if (vec == NULL) {
1952 ret = -host_to_target_errno(errno);
1953 goto out2;
1954 }
1955 msg.msg_iovlen = count;
1956 msg.msg_iov = vec;
1957
1958 if (send) {
1959 ret = target_to_host_cmsg(&msg, msgp);
1960 if (ret == 0)
1961 ret = get_errno(sendmsg(fd, &msg, flags));
1962 } else {
1963 ret = get_errno(recvmsg(fd, &msg, flags));
1964 if (!is_error(ret)) {
1965 len = ret;
1966 ret = host_to_target_cmsg(msgp, &msg);
1967 if (!is_error(ret)) {
1968 msgp->msg_namelen = tswap32(msg.msg_namelen);
1969 if (msg.msg_name != NULL) {
1970 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1971 msg.msg_name, msg.msg_namelen);
1972 if (ret) {
1973 goto out;
1974 }
1975 }
1976
1977 ret = len;
1978 }
1979 }
1980 }
1981
1982 out:
1983 unlock_iovec(vec, target_vec, count, !send);
1984 out2:
1985 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1986 return ret;
1987 }
1988
1989 /* do_accept() Must return target values and target errnos. */
1990 static abi_long do_accept(int fd, abi_ulong target_addr,
1991 abi_ulong target_addrlen_addr)
1992 {
1993 socklen_t addrlen;
1994 void *addr;
1995 abi_long ret;
1996
1997 if (target_addr == 0)
1998 return get_errno(accept(fd, NULL, NULL));
1999
2000 /* linux returns EINVAL if addrlen pointer is invalid */
2001 if (get_user_u32(addrlen, target_addrlen_addr))
2002 return -TARGET_EINVAL;
2003
2004 if ((int)addrlen < 0) {
2005 return -TARGET_EINVAL;
2006 }
2007
2008 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2009 return -TARGET_EINVAL;
2010
2011 addr = alloca(addrlen);
2012
2013 ret = get_errno(accept(fd, addr, &addrlen));
2014 if (!is_error(ret)) {
2015 host_to_target_sockaddr(target_addr, addr, addrlen);
2016 if (put_user_u32(addrlen, target_addrlen_addr))
2017 ret = -TARGET_EFAULT;
2018 }
2019 return ret;
2020 }
2021
2022 /* do_getpeername() Must return target values and target errnos. */
2023 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2024 abi_ulong target_addrlen_addr)
2025 {
2026 socklen_t addrlen;
2027 void *addr;
2028 abi_long ret;
2029
2030 if (get_user_u32(addrlen, target_addrlen_addr))
2031 return -TARGET_EFAULT;
2032
2033 if ((int)addrlen < 0) {
2034 return -TARGET_EINVAL;
2035 }
2036
2037 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2038 return -TARGET_EFAULT;
2039
2040 addr = alloca(addrlen);
2041
2042 ret = get_errno(getpeername(fd, addr, &addrlen));
2043 if (!is_error(ret)) {
2044 host_to_target_sockaddr(target_addr, addr, addrlen);
2045 if (put_user_u32(addrlen, target_addrlen_addr))
2046 ret = -TARGET_EFAULT;
2047 }
2048 return ret;
2049 }
2050
2051 /* do_getsockname() Must return target values and target errnos. */
2052 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2053 abi_ulong target_addrlen_addr)
2054 {
2055 socklen_t addrlen;
2056 void *addr;
2057 abi_long ret;
2058
2059 if (get_user_u32(addrlen, target_addrlen_addr))
2060 return -TARGET_EFAULT;
2061
2062 if ((int)addrlen < 0) {
2063 return -TARGET_EINVAL;
2064 }
2065
2066 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2067 return -TARGET_EFAULT;
2068
2069 addr = alloca(addrlen);
2070
2071 ret = get_errno(getsockname(fd, addr, &addrlen));
2072 if (!is_error(ret)) {
2073 host_to_target_sockaddr(target_addr, addr, addrlen);
2074 if (put_user_u32(addrlen, target_addrlen_addr))
2075 ret = -TARGET_EFAULT;
2076 }
2077 return ret;
2078 }
2079
2080 /* do_socketpair() Must return target values and target errnos. */
2081 static abi_long do_socketpair(int domain, int type, int protocol,
2082 abi_ulong target_tab_addr)
2083 {
2084 int tab[2];
2085 abi_long ret;
2086
2087 ret = get_errno(socketpair(domain, type, protocol, tab));
2088 if (!is_error(ret)) {
2089 if (put_user_s32(tab[0], target_tab_addr)
2090 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2091 ret = -TARGET_EFAULT;
2092 }
2093 return ret;
2094 }
2095
2096 /* do_sendto() Must return target values and target errnos. */
2097 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2098 abi_ulong target_addr, socklen_t addrlen)
2099 {
2100 void *addr;
2101 void *host_msg;
2102 abi_long ret;
2103
2104 if ((int)addrlen < 0) {
2105 return -TARGET_EINVAL;
2106 }
2107
2108 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2109 if (!host_msg)
2110 return -TARGET_EFAULT;
2111 if (target_addr) {
2112 addr = alloca(addrlen);
2113 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2114 if (ret) {
2115 unlock_user(host_msg, msg, 0);
2116 return ret;
2117 }
2118 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2119 } else {
2120 ret = get_errno(send(fd, host_msg, len, flags));
2121 }
2122 unlock_user(host_msg, msg, 0);
2123 return ret;
2124 }
2125
2126 /* do_recvfrom() Must return target values and target errnos. */
2127 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2128 abi_ulong target_addr,
2129 abi_ulong target_addrlen)
2130 {
2131 socklen_t addrlen;
2132 void *addr;
2133 void *host_msg;
2134 abi_long ret;
2135
2136 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2137 if (!host_msg)
2138 return -TARGET_EFAULT;
2139 if (target_addr) {
2140 if (get_user_u32(addrlen, target_addrlen)) {
2141 ret = -TARGET_EFAULT;
2142 goto fail;
2143 }
2144 if ((int)addrlen < 0) {
2145 ret = -TARGET_EINVAL;
2146 goto fail;
2147 }
2148 addr = alloca(addrlen);
2149 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2150 } else {
2151 addr = NULL; /* To keep compiler quiet. */
2152 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2153 }
2154 if (!is_error(ret)) {
2155 if (target_addr) {
2156 host_to_target_sockaddr(target_addr, addr, addrlen);
2157 if (put_user_u32(addrlen, target_addrlen)) {
2158 ret = -TARGET_EFAULT;
2159 goto fail;
2160 }
2161 }
2162 unlock_user(host_msg, msg, len);
2163 } else {
2164 fail:
2165 unlock_user(host_msg, msg, 0);
2166 }
2167 return ret;
2168 }
2169
2170 #ifdef TARGET_NR_socketcall
2171 /* do_socketcall() Must return target values and target errnos. */
2172 static abi_long do_socketcall(int num, abi_ulong vptr)
2173 {
2174 abi_long ret;
2175 const int n = sizeof(abi_ulong);
2176
2177 switch(num) {
2178 case SOCKOP_socket:
2179 {
2180 abi_ulong domain, type, protocol;
2181
2182 if (get_user_ual(domain, vptr)
2183 || get_user_ual(type, vptr + n)
2184 || get_user_ual(protocol, vptr + 2 * n))
2185 return -TARGET_EFAULT;
2186
2187 ret = do_socket(domain, type, protocol);
2188 }
2189 break;
2190 case SOCKOP_bind:
2191 {
2192 abi_ulong sockfd;
2193 abi_ulong target_addr;
2194 socklen_t addrlen;
2195
2196 if (get_user_ual(sockfd, vptr)
2197 || get_user_ual(target_addr, vptr + n)
2198 || get_user_ual(addrlen, vptr + 2 * n))
2199 return -TARGET_EFAULT;
2200
2201 ret = do_bind(sockfd, target_addr, addrlen);
2202 }
2203 break;
2204 case SOCKOP_connect:
2205 {
2206 abi_ulong sockfd;
2207 abi_ulong target_addr;
2208 socklen_t addrlen;
2209
2210 if (get_user_ual(sockfd, vptr)
2211 || get_user_ual(target_addr, vptr + n)
2212 || get_user_ual(addrlen, vptr + 2 * n))
2213 return -TARGET_EFAULT;
2214
2215 ret = do_connect(sockfd, target_addr, addrlen);
2216 }
2217 break;
2218 case SOCKOP_listen:
2219 {
2220 abi_ulong sockfd, backlog;
2221
2222 if (get_user_ual(sockfd, vptr)
2223 || get_user_ual(backlog, vptr + n))
2224 return -TARGET_EFAULT;
2225
2226 ret = get_errno(listen(sockfd, backlog));
2227 }
2228 break;
2229 case SOCKOP_accept:
2230 {
2231 abi_ulong sockfd;
2232 abi_ulong target_addr, target_addrlen;
2233
2234 if (get_user_ual(sockfd, vptr)
2235 || get_user_ual(target_addr, vptr + n)
2236 || get_user_ual(target_addrlen, vptr + 2 * n))
2237 return -TARGET_EFAULT;
2238
2239 ret = do_accept(sockfd, target_addr, target_addrlen);
2240 }
2241 break;
2242 case SOCKOP_getsockname:
2243 {
2244 abi_ulong sockfd;
2245 abi_ulong target_addr, target_addrlen;
2246
2247 if (get_user_ual(sockfd, vptr)
2248 || get_user_ual(target_addr, vptr + n)
2249 || get_user_ual(target_addrlen, vptr + 2 * n))
2250 return -TARGET_EFAULT;
2251
2252 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2253 }
2254 break;
2255 case SOCKOP_getpeername:
2256 {
2257 abi_ulong sockfd;
2258 abi_ulong target_addr, target_addrlen;
2259
2260 if (get_user_ual(sockfd, vptr)
2261 || get_user_ual(target_addr, vptr + n)
2262 || get_user_ual(target_addrlen, vptr + 2 * n))
2263 return -TARGET_EFAULT;
2264
2265 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2266 }
2267 break;
2268 case SOCKOP_socketpair:
2269 {
2270 abi_ulong domain, type, protocol;
2271 abi_ulong tab;
2272
2273 if (get_user_ual(domain, vptr)
2274 || get_user_ual(type, vptr + n)
2275 || get_user_ual(protocol, vptr + 2 * n)
2276 || get_user_ual(tab, vptr + 3 * n))
2277 return -TARGET_EFAULT;
2278
2279 ret = do_socketpair(domain, type, protocol, tab);
2280 }
2281 break;
2282 case SOCKOP_send:
2283 {
2284 abi_ulong sockfd;
2285 abi_ulong msg;
2286 size_t len;
2287 abi_ulong flags;
2288
2289 if (get_user_ual(sockfd, vptr)
2290 || get_user_ual(msg, vptr + n)
2291 || get_user_ual(len, vptr + 2 * n)
2292 || get_user_ual(flags, vptr + 3 * n))
2293 return -TARGET_EFAULT;
2294
2295 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2296 }
2297 break;
2298 case SOCKOP_recv:
2299 {
2300 abi_ulong sockfd;
2301 abi_ulong msg;
2302 size_t len;
2303 abi_ulong flags;
2304
2305 if (get_user_ual(sockfd, vptr)
2306 || get_user_ual(msg, vptr + n)
2307 || get_user_ual(len, vptr + 2 * n)
2308 || get_user_ual(flags, vptr + 3 * n))
2309 return -TARGET_EFAULT;
2310
2311 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2312 }
2313 break;
2314 case SOCKOP_sendto:
2315 {
2316 abi_ulong sockfd;
2317 abi_ulong msg;
2318 size_t len;
2319 abi_ulong flags;
2320 abi_ulong addr;
2321 socklen_t addrlen;
2322
2323 if (get_user_ual(sockfd, vptr)
2324 || get_user_ual(msg, vptr + n)
2325 || get_user_ual(len, vptr + 2 * n)
2326 || get_user_ual(flags, vptr + 3 * n)
2327 || get_user_ual(addr, vptr + 4 * n)
2328 || get_user_ual(addrlen, vptr + 5 * n))
2329 return -TARGET_EFAULT;
2330
2331 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2332 }
2333 break;
2334 case SOCKOP_recvfrom:
2335 {
2336 abi_ulong sockfd;
2337 abi_ulong msg;
2338 size_t len;
2339 abi_ulong flags;
2340 abi_ulong addr;
2341 socklen_t addrlen;
2342
2343 if (get_user_ual(sockfd, vptr)
2344 || get_user_ual(msg, vptr + n)
2345 || get_user_ual(len, vptr + 2 * n)
2346 || get_user_ual(flags, vptr + 3 * n)
2347 || get_user_ual(addr, vptr + 4 * n)
2348 || get_user_ual(addrlen, vptr + 5 * n))
2349 return -TARGET_EFAULT;
2350
2351 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2352 }
2353 break;
2354 case SOCKOP_shutdown:
2355 {
2356 abi_ulong sockfd, how;
2357
2358 if (get_user_ual(sockfd, vptr)
2359 || get_user_ual(how, vptr + n))
2360 return -TARGET_EFAULT;
2361
2362 ret = get_errno(shutdown(sockfd, how));
2363 }
2364 break;
2365 case SOCKOP_sendmsg:
2366 case SOCKOP_recvmsg:
2367 {
2368 abi_ulong fd;
2369 abi_ulong target_msg;
2370 abi_ulong flags;
2371
2372 if (get_user_ual(fd, vptr)
2373 || get_user_ual(target_msg, vptr + n)
2374 || get_user_ual(flags, vptr + 2 * n))
2375 return -TARGET_EFAULT;
2376
2377 ret = do_sendrecvmsg(fd, target_msg, flags,
2378 (num == SOCKOP_sendmsg));
2379 }
2380 break;
2381 case SOCKOP_setsockopt:
2382 {
2383 abi_ulong sockfd;
2384 abi_ulong level;
2385 abi_ulong optname;
2386 abi_ulong optval;
2387 socklen_t optlen;
2388
2389 if (get_user_ual(sockfd, vptr)
2390 || get_user_ual(level, vptr + n)
2391 || get_user_ual(optname, vptr + 2 * n)
2392 || get_user_ual(optval, vptr + 3 * n)
2393 || get_user_ual(optlen, vptr + 4 * n))
2394 return -TARGET_EFAULT;
2395
2396 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2397 }
2398 break;
2399 case SOCKOP_getsockopt:
2400 {
2401 abi_ulong sockfd;
2402 abi_ulong level;
2403 abi_ulong optname;
2404 abi_ulong optval;
2405 socklen_t optlen;
2406
2407 if (get_user_ual(sockfd, vptr)
2408 || get_user_ual(level, vptr + n)
2409 || get_user_ual(optname, vptr + 2 * n)
2410 || get_user_ual(optval, vptr + 3 * n)
2411 || get_user_ual(optlen, vptr + 4 * n))
2412 return -TARGET_EFAULT;
2413
2414 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2415 }
2416 break;
2417 default:
2418 gemu_log("Unsupported socketcall: %d\n", num);
2419 ret = -TARGET_ENOSYS;
2420 break;
2421 }
2422 return ret;
2423 }
2424 #endif
2425
2426 #define N_SHM_REGIONS 32
2427
2428 static struct shm_region {
2429 abi_ulong start;
2430 abi_ulong size;
2431 } shm_regions[N_SHM_REGIONS];
2432
2433 struct target_ipc_perm
2434 {
2435 abi_long __key;
2436 abi_ulong uid;
2437 abi_ulong gid;
2438 abi_ulong cuid;
2439 abi_ulong cgid;
2440 unsigned short int mode;
2441 unsigned short int __pad1;
2442 unsigned short int __seq;
2443 unsigned short int __pad2;
2444 abi_ulong __unused1;
2445 abi_ulong __unused2;
2446 };
2447
2448 struct target_semid_ds
2449 {
2450 struct target_ipc_perm sem_perm;
2451 abi_ulong sem_otime;
2452 abi_ulong __unused1;
2453 abi_ulong sem_ctime;
2454 abi_ulong __unused2;
2455 abi_ulong sem_nsems;
2456 abi_ulong __unused3;
2457 abi_ulong __unused4;
2458 };
2459
2460 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2461 abi_ulong target_addr)
2462 {
2463 struct target_ipc_perm *target_ip;
2464 struct target_semid_ds *target_sd;
2465
2466 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2467 return -TARGET_EFAULT;
2468 target_ip = &(target_sd->sem_perm);
2469 host_ip->__key = tswapal(target_ip->__key);
2470 host_ip->uid = tswapal(target_ip->uid);
2471 host_ip->gid = tswapal(target_ip->gid);
2472 host_ip->cuid = tswapal(target_ip->cuid);
2473 host_ip->cgid = tswapal(target_ip->cgid);
2474 host_ip->mode = tswap16(target_ip->mode);
2475 unlock_user_struct(target_sd, target_addr, 0);
2476 return 0;
2477 }
2478
2479 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2480 struct ipc_perm *host_ip)
2481 {
2482 struct target_ipc_perm *target_ip;
2483 struct target_semid_ds *target_sd;
2484
2485 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2486 return -TARGET_EFAULT;
2487 target_ip = &(target_sd->sem_perm);
2488 target_ip->__key = tswapal(host_ip->__key);
2489 target_ip->uid = tswapal(host_ip->uid);
2490 target_ip->gid = tswapal(host_ip->gid);
2491 target_ip->cuid = tswapal(host_ip->cuid);
2492 target_ip->cgid = tswapal(host_ip->cgid);
2493 target_ip->mode = tswap16(host_ip->mode);
2494 unlock_user_struct(target_sd, target_addr, 1);
2495 return 0;
2496 }
2497
2498 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2499 abi_ulong target_addr)
2500 {
2501 struct target_semid_ds *target_sd;
2502
2503 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2504 return -TARGET_EFAULT;
2505 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2506 return -TARGET_EFAULT;
2507 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2508 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2509 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2510 unlock_user_struct(target_sd, target_addr, 0);
2511 return 0;
2512 }
2513
2514 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2515 struct semid_ds *host_sd)
2516 {
2517 struct target_semid_ds *target_sd;
2518
2519 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2520 return -TARGET_EFAULT;
2521 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2522 return -TARGET_EFAULT;
2523 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2524 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2525 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2526 unlock_user_struct(target_sd, target_addr, 1);
2527 return 0;
2528 }
2529
2530 struct target_seminfo {
2531 int semmap;
2532 int semmni;
2533 int semmns;
2534 int semmnu;
2535 int semmsl;
2536 int semopm;
2537 int semume;
2538 int semusz;
2539 int semvmx;
2540 int semaem;
2541 };
2542
2543 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2544 struct seminfo *host_seminfo)
2545 {
2546 struct target_seminfo *target_seminfo;
2547 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2548 return -TARGET_EFAULT;
2549 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2550 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2551 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2552 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2553 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2554 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2555 __put_user(host_seminfo->semume, &target_seminfo->semume);
2556 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2557 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2558 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2559 unlock_user_struct(target_seminfo, target_addr, 1);
2560 return 0;
2561 }
2562
2563 union semun {
2564 int val;
2565 struct semid_ds *buf;
2566 unsigned short *array;
2567 struct seminfo *__buf;
2568 };
2569
2570 union target_semun {
2571 int val;
2572 abi_ulong buf;
2573 abi_ulong array;
2574 abi_ulong __buf;
2575 };
2576
2577 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2578 abi_ulong target_addr)
2579 {
2580 int nsems;
2581 unsigned short *array;
2582 union semun semun;
2583 struct semid_ds semid_ds;
2584 int i, ret;
2585
2586 semun.buf = &semid_ds;
2587
2588 ret = semctl(semid, 0, IPC_STAT, semun);
2589 if (ret == -1)
2590 return get_errno(ret);
2591
2592 nsems = semid_ds.sem_nsems;
2593
2594 *host_array = malloc(nsems*sizeof(unsigned short));
2595 array = lock_user(VERIFY_READ, target_addr,
2596 nsems*sizeof(unsigned short), 1);
2597 if (!array)
2598 return -TARGET_EFAULT;
2599
2600 for(i=0; i<nsems; i++) {
2601 __get_user((*host_array)[i], &array[i]);
2602 }
2603 unlock_user(array, target_addr, 0);
2604
2605 return 0;
2606 }
2607
2608 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2609 unsigned short **host_array)
2610 {
2611 int nsems;
2612 unsigned short *array;
2613 union semun semun;
2614 struct semid_ds semid_ds;
2615 int i, ret;
2616
2617 semun.buf = &semid_ds;
2618
2619 ret = semctl(semid, 0, IPC_STAT, semun);
2620 if (ret == -1)
2621 return get_errno(ret);
2622
2623 nsems = semid_ds.sem_nsems;
2624
2625 array = lock_user(VERIFY_WRITE, target_addr,
2626 nsems*sizeof(unsigned short), 0);
2627 if (!array)
2628 return -TARGET_EFAULT;
2629
2630 for(i=0; i<nsems; i++) {
2631 __put_user((*host_array)[i], &array[i]);
2632 }
2633 free(*host_array);
2634 unlock_user(array, target_addr, 1);
2635
2636 return 0;
2637 }
2638
2639 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2640 union target_semun target_su)
2641 {
2642 union semun arg;
2643 struct semid_ds dsarg;
2644 unsigned short *array = NULL;
2645 struct seminfo seminfo;
2646 abi_long ret = -TARGET_EINVAL;
2647 abi_long err;
2648 cmd &= 0xff;
2649
2650 switch( cmd ) {
2651 case GETVAL:
2652 case SETVAL:
2653 arg.val = tswap32(target_su.val);
2654 ret = get_errno(semctl(semid, semnum, cmd, arg));
2655 target_su.val = tswap32(arg.val);
2656 break;
2657 case GETALL:
2658 case SETALL:
2659 err = target_to_host_semarray(semid, &array, target_su.array);
2660 if (err)
2661 return err;
2662 arg.array = array;
2663 ret = get_errno(semctl(semid, semnum, cmd, arg));
2664 err = host_to_target_semarray(semid, target_su.array, &array);
2665 if (err)
2666 return err;
2667 break;
2668 case IPC_STAT:
2669 case IPC_SET:
2670 case SEM_STAT:
2671 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2672 if (err)
2673 return err;
2674 arg.buf = &dsarg;
2675 ret = get_errno(semctl(semid, semnum, cmd, arg));
2676 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2677 if (err)
2678 return err;
2679 break;
2680 case IPC_INFO:
2681 case SEM_INFO:
2682 arg.__buf = &seminfo;
2683 ret = get_errno(semctl(semid, semnum, cmd, arg));
2684 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2685 if (err)
2686 return err;
2687 break;
2688 case IPC_RMID:
2689 case GETPID:
2690 case GETNCNT:
2691 case GETZCNT:
2692 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2693 break;
2694 }
2695
2696 return ret;
2697 }
2698
2699 struct target_sembuf {
2700 unsigned short sem_num;
2701 short sem_op;
2702 short sem_flg;
2703 };
2704
2705 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2706 abi_ulong target_addr,
2707 unsigned nsops)
2708 {
2709 struct target_sembuf *target_sembuf;
2710 int i;
2711
2712 target_sembuf = lock_user(VERIFY_READ, target_addr,
2713 nsops*sizeof(struct target_sembuf), 1);
2714 if (!target_sembuf)
2715 return -TARGET_EFAULT;
2716
2717 for(i=0; i<nsops; i++) {
2718 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2719 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2720 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2721 }
2722
2723 unlock_user(target_sembuf, target_addr, 0);
2724
2725 return 0;
2726 }
2727
2728 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2729 {
2730 struct sembuf sops[nsops];
2731
2732 if (target_to_host_sembuf(sops, ptr, nsops))
2733 return -TARGET_EFAULT;
2734
2735 return semop(semid, sops, nsops);
2736 }
2737
2738 struct target_msqid_ds
2739 {
2740 struct target_ipc_perm msg_perm;
2741 abi_ulong msg_stime;
2742 #if TARGET_ABI_BITS == 32
2743 abi_ulong __unused1;
2744 #endif
2745 abi_ulong msg_rtime;
2746 #if TARGET_ABI_BITS == 32
2747 abi_ulong __unused2;
2748 #endif
2749 abi_ulong msg_ctime;
2750 #if TARGET_ABI_BITS == 32
2751 abi_ulong __unused3;
2752 #endif
2753 abi_ulong __msg_cbytes;
2754 abi_ulong msg_qnum;
2755 abi_ulong msg_qbytes;
2756 abi_ulong msg_lspid;
2757 abi_ulong msg_lrpid;
2758 abi_ulong __unused4;
2759 abi_ulong __unused5;
2760 };
2761
2762 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2763 abi_ulong target_addr)
2764 {
2765 struct target_msqid_ds *target_md;
2766
2767 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2768 return -TARGET_EFAULT;
2769 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2770 return -TARGET_EFAULT;
2771 host_md->msg_stime = tswapal(target_md->msg_stime);
2772 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2773 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2774 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2775 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2776 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2777 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2778 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2779 unlock_user_struct(target_md, target_addr, 0);
2780 return 0;
2781 }
2782
2783 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2784 struct msqid_ds *host_md)
2785 {
2786 struct target_msqid_ds *target_md;
2787
2788 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2789 return -TARGET_EFAULT;
2790 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2791 return -TARGET_EFAULT;
2792 target_md->msg_stime = tswapal(host_md->msg_stime);
2793 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2794 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2795 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2796 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2797 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2798 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2799 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2800 unlock_user_struct(target_md, target_addr, 1);
2801 return 0;
2802 }
2803
2804 struct target_msginfo {
2805 int msgpool;
2806 int msgmap;
2807 int msgmax;
2808 int msgmnb;
2809 int msgmni;
2810 int msgssz;
2811 int msgtql;
2812 unsigned short int msgseg;
2813 };
2814
2815 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2816 struct msginfo *host_msginfo)
2817 {
2818 struct target_msginfo *target_msginfo;
2819 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2820 return -TARGET_EFAULT;
2821 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2822 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2823 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2824 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2825 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2826 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2827 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2828 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2829 unlock_user_struct(target_msginfo, target_addr, 1);
2830 return 0;
2831 }
2832
2833 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2834 {
2835 struct msqid_ds dsarg;
2836 struct msginfo msginfo;
2837 abi_long ret = -TARGET_EINVAL;
2838
2839 cmd &= 0xff;
2840
2841 switch (cmd) {
2842 case IPC_STAT:
2843 case IPC_SET:
2844 case MSG_STAT:
2845 if (target_to_host_msqid_ds(&dsarg,ptr))
2846 return -TARGET_EFAULT;
2847 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2848 if (host_to_target_msqid_ds(ptr,&dsarg))
2849 return -TARGET_EFAULT;
2850 break;
2851 case IPC_RMID:
2852 ret = get_errno(msgctl(msgid, cmd, NULL));
2853 break;
2854 case IPC_INFO:
2855 case MSG_INFO:
2856 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2857 if (host_to_target_msginfo(ptr, &msginfo))
2858 return -TARGET_EFAULT;
2859 break;
2860 }
2861
2862 return ret;
2863 }
2864
2865 struct target_msgbuf {
2866 abi_long mtype;
2867 char mtext[1];
2868 };
2869
2870 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2871 unsigned int msgsz, int msgflg)
2872 {
2873 struct target_msgbuf *target_mb;
2874 struct msgbuf *host_mb;
2875 abi_long ret = 0;
2876
2877 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2878 return -TARGET_EFAULT;
2879 host_mb = malloc(msgsz+sizeof(long));
2880 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2881 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2882 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2883 free(host_mb);
2884 unlock_user_struct(target_mb, msgp, 0);
2885
2886 return ret;
2887 }
2888
2889 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2890 unsigned int msgsz, abi_long msgtyp,
2891 int msgflg)
2892 {
2893 struct target_msgbuf *target_mb;
2894 char *target_mtext;
2895 struct msgbuf *host_mb;
2896 abi_long ret = 0;
2897
2898 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2899 return -TARGET_EFAULT;
2900
2901 host_mb = g_malloc(msgsz+sizeof(long));
2902 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2903
2904 if (ret > 0) {
2905 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2906 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2907 if (!target_mtext) {
2908 ret = -TARGET_EFAULT;
2909 goto end;
2910 }
2911 memcpy(target_mb->mtext, host_mb->mtext, ret);
2912 unlock_user(target_mtext, target_mtext_addr, ret);
2913 }
2914
2915 target_mb->mtype = tswapal(host_mb->mtype);
2916
2917 end:
2918 if (target_mb)
2919 unlock_user_struct(target_mb, msgp, 1);
2920 g_free(host_mb);
2921 return ret;
2922 }
2923
2924 struct target_shmid_ds
2925 {
2926 struct target_ipc_perm shm_perm;
2927 abi_ulong shm_segsz;
2928 abi_ulong shm_atime;
2929 #if TARGET_ABI_BITS == 32
2930 abi_ulong __unused1;
2931 #endif
2932 abi_ulong shm_dtime;
2933 #if TARGET_ABI_BITS == 32
2934 abi_ulong __unused2;
2935 #endif
2936 abi_ulong shm_ctime;
2937 #if TARGET_ABI_BITS == 32
2938 abi_ulong __unused3;
2939 #endif
2940 int shm_cpid;
2941 int shm_lpid;
2942 abi_ulong shm_nattch;
2943 unsigned long int __unused4;
2944 unsigned long int __unused5;
2945 };
2946
2947 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2948 abi_ulong target_addr)
2949 {
2950 struct target_shmid_ds *target_sd;
2951
2952 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2953 return -TARGET_EFAULT;
2954 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2955 return -TARGET_EFAULT;
2956 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2957 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2958 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2959 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2960 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2961 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2962 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2963 unlock_user_struct(target_sd, target_addr, 0);
2964 return 0;
2965 }
2966
2967 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2968 struct shmid_ds *host_sd)
2969 {
2970 struct target_shmid_ds *target_sd;
2971
2972 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2973 return -TARGET_EFAULT;
2974 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2975 return -TARGET_EFAULT;
2976 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2977 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2978 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2979 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2980 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2981 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2982 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2983 unlock_user_struct(target_sd, target_addr, 1);
2984 return 0;
2985 }
2986
2987 struct target_shminfo {
2988 abi_ulong shmmax;
2989 abi_ulong shmmin;
2990 abi_ulong shmmni;
2991 abi_ulong shmseg;
2992 abi_ulong shmall;
2993 };
2994
2995 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2996 struct shminfo *host_shminfo)
2997 {
2998 struct target_shminfo *target_shminfo;
2999 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3000 return -TARGET_EFAULT;
3001 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3002 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3003 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3004 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3005 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3006 unlock_user_struct(target_shminfo, target_addr, 1);
3007 return 0;
3008 }
3009
3010 struct target_shm_info {
3011 int used_ids;
3012 abi_ulong shm_tot;
3013 abi_ulong shm_rss;
3014 abi_ulong shm_swp;
3015 abi_ulong swap_attempts;
3016 abi_ulong swap_successes;
3017 };
3018
3019 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3020 struct shm_info *host_shm_info)
3021 {
3022 struct target_shm_info *target_shm_info;
3023 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3024 return -TARGET_EFAULT;
3025 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3026 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3027 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3028 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3029 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3030 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3031 unlock_user_struct(target_shm_info, target_addr, 1);
3032 return 0;
3033 }
3034
3035 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3036 {
3037 struct shmid_ds dsarg;
3038 struct shminfo shminfo;
3039 struct shm_info shm_info;
3040 abi_long ret = -TARGET_EINVAL;
3041
3042 cmd &= 0xff;
3043
3044 switch(cmd) {
3045 case IPC_STAT:
3046 case IPC_SET:
3047 case SHM_STAT:
3048 if (target_to_host_shmid_ds(&dsarg, buf))
3049 return -TARGET_EFAULT;
3050 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3051 if (host_to_target_shmid_ds(buf, &dsarg))
3052 return -TARGET_EFAULT;
3053 break;
3054 case IPC_INFO:
3055 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3056 if (host_to_target_shminfo(buf, &shminfo))
3057 return -TARGET_EFAULT;
3058 break;
3059 case SHM_INFO:
3060 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3061 if (host_to_target_shm_info(buf, &shm_info))
3062 return -TARGET_EFAULT;
3063 break;
3064 case IPC_RMID:
3065 case SHM_LOCK:
3066 case SHM_UNLOCK:
3067 ret = get_errno(shmctl(shmid, cmd, NULL));
3068 break;
3069 }
3070
3071 return ret;
3072 }
3073
3074 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3075 {
3076 abi_long raddr;
3077 void *host_raddr;
3078 struct shmid_ds shm_info;
3079 int i,ret;
3080
3081 /* find out the length of the shared memory segment */
3082 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3083 if (is_error(ret)) {
3084 /* can't get length, bail out */
3085 return ret;
3086 }
3087
3088 mmap_lock();
3089
3090 if (shmaddr)
3091 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3092 else {
3093 abi_ulong mmap_start;
3094
3095 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3096
3097 if (mmap_start == -1) {
3098 errno = ENOMEM;
3099 host_raddr = (void *)-1;
3100 } else
3101 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3102 }
3103
3104 if (host_raddr == (void *)-1) {
3105 mmap_unlock();
3106 return get_errno((long)host_raddr);
3107 }
3108 raddr=h2g((unsigned long)host_raddr);
3109
3110 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3111 PAGE_VALID | PAGE_READ |
3112 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3113
3114 for (i = 0; i < N_SHM_REGIONS; i++) {
3115 if (shm_regions[i].start == 0) {
3116 shm_regions[i].start = raddr;
3117 shm_regions[i].size = shm_info.shm_segsz;
3118 break;
3119 }
3120 }
3121
3122 mmap_unlock();
3123 return raddr;
3124
3125 }
3126
3127 static inline abi_long do_shmdt(abi_ulong shmaddr)
3128 {
3129 int i;
3130
3131 for (i = 0; i < N_SHM_REGIONS; ++i) {
3132 if (shm_regions[i].start == shmaddr) {
3133 shm_regions[i].start = 0;
3134 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3135 break;
3136 }
3137 }
3138
3139 return get_errno(shmdt(g2h(shmaddr)));
3140 }
3141
3142 #ifdef TARGET_NR_ipc
3143 /* ??? This only works with linear mappings. */
3144 /* do_ipc() must return target values and target errnos. */
3145 static abi_long do_ipc(unsigned int call, int first,
3146 int second, int third,
3147 abi_long ptr, abi_long fifth)
3148 {
3149 int version;
3150 abi_long ret = 0;
3151
3152 version = call >> 16;
3153 call &= 0xffff;
3154
3155 switch (call) {
3156 case IPCOP_semop:
3157 ret = do_semop(first, ptr, second);
3158 break;
3159
3160 case IPCOP_semget:
3161 ret = get_errno(semget(first, second, third));
3162 break;
3163
3164 case IPCOP_semctl:
3165 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3166 break;
3167
3168 case IPCOP_msgget:
3169 ret = get_errno(msgget(first, second));
3170 break;
3171
3172 case IPCOP_msgsnd:
3173 ret = do_msgsnd(first, ptr, second, third);
3174 break;
3175
3176 case IPCOP_msgctl:
3177 ret = do_msgctl(first, second, ptr);
3178 break;
3179
3180 case IPCOP_msgrcv:
3181 switch (version) {
3182 case 0:
3183 {
3184 struct target_ipc_kludge {
3185 abi_long msgp;
3186 abi_long msgtyp;
3187 } *tmp;
3188
3189 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3190 ret = -TARGET_EFAULT;
3191 break;
3192 }
3193
3194 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3195
3196 unlock_user_struct(tmp, ptr, 0);
3197 break;
3198 }
3199 default:
3200 ret = do_msgrcv(first, ptr, second, fifth, third);
3201 }
3202 break;
3203
3204 case IPCOP_shmat:
3205 switch (version) {
3206 default:
3207 {
3208 abi_ulong raddr;
3209 raddr = do_shmat(first, ptr, second);
3210 if (is_error(raddr))
3211 return get_errno(raddr);
3212 if (put_user_ual(raddr, third))
3213 return -TARGET_EFAULT;
3214 break;
3215 }
3216 case 1:
3217 ret = -TARGET_EINVAL;
3218 break;
3219 }
3220 break;
3221 case IPCOP_shmdt:
3222 ret = do_shmdt(ptr);
3223 break;
3224
3225 case IPCOP_shmget:
3226 /* IPC_* flag values are the same on all linux platforms */
3227 ret = get_errno(shmget(first, second, third));
3228 break;
3229
3230 /* IPC_* and SHM_* command values are the same on all linux platforms */
3231 case IPCOP_shmctl:
3232 ret = do_shmctl(first, second, third);
3233 break;
3234 default:
3235 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3236 ret = -TARGET_ENOSYS;
3237 break;
3238 }
3239 return ret;
3240 }
3241 #endif
3242
3243 /* kernel structure types definitions */
3244
3245 #define STRUCT(name, ...) STRUCT_ ## name,
3246 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3247 enum {
3248 #include "syscall_types.h"
3249 };
3250 #undef STRUCT
3251 #undef STRUCT_SPECIAL
3252
3253 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3254 #define STRUCT_SPECIAL(name)
3255 #include "syscall_types.h"
3256 #undef STRUCT
3257 #undef STRUCT_SPECIAL
3258
3259 typedef struct IOCTLEntry IOCTLEntry;
3260
3261 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3262 int fd, abi_long cmd, abi_long arg);
3263
3264 struct IOCTLEntry {
3265 unsigned int target_cmd;
3266 unsigned int host_cmd;
3267 const char *name;
3268 int access;
3269 do_ioctl_fn *do_ioctl;
3270 const argtype arg_type[5];
3271 };
3272
3273 #define IOC_R 0x0001
3274 #define IOC_W 0x0002
3275 #define IOC_RW (IOC_R | IOC_W)
3276
3277 #define MAX_STRUCT_SIZE 4096
3278
3279 #ifdef CONFIG_FIEMAP
3280 /* So fiemap access checks don't overflow on 32 bit systems.
3281 * This is very slightly smaller than the limit imposed by
3282 * the underlying kernel.
3283 */
3284 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3285 / sizeof(struct fiemap_extent))
3286
3287 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3288 int fd, abi_long cmd, abi_long arg)
3289 {
3290 /* The parameter for this ioctl is a struct fiemap followed
3291 * by an array of struct fiemap_extent whose size is set
3292 * in fiemap->fm_extent_count. The array is filled in by the
3293 * ioctl.
3294 */
3295 int target_size_in, target_size_out;
3296 struct fiemap *fm;
3297 const argtype *arg_type = ie->arg_type;
3298 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3299 void *argptr, *p;
3300 abi_long ret;
3301 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3302 uint32_t outbufsz;
3303 int free_fm = 0;
3304
3305 assert(arg_type[0] == TYPE_PTR);
3306 assert(ie->access == IOC_RW);
3307 arg_type++;
3308 target_size_in = thunk_type_size(arg_type, 0);
3309 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3310 if (!argptr) {
3311 return -TARGET_EFAULT;
3312 }
3313 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3314 unlock_user(argptr, arg, 0);
3315 fm = (struct fiemap *)buf_temp;
3316 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3317 return -TARGET_EINVAL;
3318 }
3319
3320 outbufsz = sizeof (*fm) +
3321 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3322
3323 if (outbufsz > MAX_STRUCT_SIZE) {
3324 /* We can't fit all the extents into the fixed size buffer.
3325 * Allocate one that is large enough and use it instead.
3326 */
3327 fm = malloc(outbufsz);
3328 if (!fm) {
3329 return -TARGET_ENOMEM;
3330 }
3331 memcpy(fm, buf_temp, sizeof(struct fiemap));
3332 free_fm = 1;
3333 }
3334 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3335 if (!is_error(ret)) {
3336 target_size_out = target_size_in;
3337 /* An extent_count of 0 means we were only counting the extents
3338 * so there are no structs to copy
3339 */
3340 if (fm->fm_extent_count != 0) {
3341 target_size_out += fm->fm_mapped_extents * extent_size;
3342 }
3343 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3344 if (!argptr) {
3345 ret = -TARGET_EFAULT;
3346 } else {
3347 /* Convert the struct fiemap */
3348 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3349 if (fm->fm_extent_count != 0) {
3350 p = argptr + target_size_in;
3351 /* ...and then all the struct fiemap_extents */
3352 for (i = 0; i < fm->fm_mapped_extents; i++) {
3353 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3354 THUNK_TARGET);
3355 p += extent_size;
3356 }
3357 }
3358 unlock_user(argptr, arg, target_size_out);
3359 }
3360 }
3361 if (free_fm) {
3362 free(fm);
3363 }
3364 return ret;
3365 }
3366 #endif
3367
3368 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3369 int fd, abi_long cmd, abi_long arg)
3370 {
3371 const argtype *arg_type = ie->arg_type;
3372 int target_size;
3373 void *argptr;
3374 int ret;
3375 struct ifconf *host_ifconf;
3376 uint32_t outbufsz;
3377 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3378 int target_ifreq_size;
3379 int nb_ifreq;
3380 int free_buf = 0;
3381 int i;
3382 int target_ifc_len;
3383 abi_long target_ifc_buf;
3384 int host_ifc_len;
3385 char *host_ifc_buf;
3386
3387 assert(arg_type[0] == TYPE_PTR);
3388 assert(ie->access == IOC_RW);
3389
3390 arg_type++;
3391 target_size = thunk_type_size(arg_type, 0);
3392
3393 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3394 if (!argptr)
3395 return -TARGET_EFAULT;
3396 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3397 unlock_user(argptr, arg, 0);
3398
3399 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3400 target_ifc_len = host_ifconf->ifc_len;
3401 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3402
3403 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3404 nb_ifreq = target_ifc_len / target_ifreq_size;
3405 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3406
3407 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3408 if (outbufsz > MAX_STRUCT_SIZE) {
3409 /* We can't fit all the extents into the fixed size buffer.
3410 * Allocate one that is large enough and use it instead.
3411 */
3412 host_ifconf = malloc(outbufsz);
3413 if (!host_ifconf) {
3414 return -TARGET_ENOMEM;
3415 }
3416 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3417 free_buf = 1;
3418 }
3419 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3420
3421 host_ifconf->ifc_len = host_ifc_len;
3422 host_ifconf->ifc_buf = host_ifc_buf;
3423
3424 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3425 if (!is_error(ret)) {
3426 /* convert host ifc_len to target ifc_len */
3427
3428 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3429 target_ifc_len = nb_ifreq * target_ifreq_size;
3430 host_ifconf->ifc_len = target_ifc_len;
3431
3432 /* restore target ifc_buf */
3433
3434 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3435
3436 /* copy struct ifconf to target user */
3437
3438 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3439 if (!argptr)
3440 return -TARGET_EFAULT;
3441 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3442 unlock_user(argptr, arg, target_size);
3443
3444 /* copy ifreq[] to target user */
3445
3446 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3447 for (i = 0; i < nb_ifreq ; i++) {
3448 thunk_convert(argptr + i * target_ifreq_size,
3449 host_ifc_buf + i * sizeof(struct ifreq),
3450 ifreq_arg_type, THUNK_TARGET);
3451 }
3452 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3453 }
3454
3455 if (free_buf) {
3456 free(host_ifconf);
3457 }
3458
3459 return ret;
3460 }
3461
3462 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3463 abi_long cmd, abi_long arg)
3464 {
3465 void *argptr;
3466 struct dm_ioctl *host_dm;
3467 abi_long guest_data;
3468 uint32_t guest_data_size;
3469 int target_size;
3470 const argtype *arg_type = ie->arg_type;
3471 abi_long ret;
3472 void *big_buf = NULL;
3473 char *host_data;
3474
3475 arg_type++;
3476 target_size = thunk_type_size(arg_type, 0);
3477 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3478 if (!argptr) {
3479 ret = -TARGET_EFAULT;
3480 goto out;
3481 }
3482 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3483 unlock_user(argptr, arg, 0);
3484
3485 /* buf_temp is too small, so fetch things into a bigger buffer */
3486 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3487 memcpy(big_buf, buf_temp, target_size);
3488 buf_temp = big_buf;
3489 host_dm = big_buf;
3490
3491 guest_data = arg + host_dm->data_start;
3492 if ((guest_data - arg) < 0) {
3493 ret = -EINVAL;
3494 goto out;
3495 }
3496 guest_data_size = host_dm->data_size - host_dm->data_start;
3497 host_data = (char*)host_dm + host_dm->data_start;
3498
3499 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3500 switch (ie->host_cmd) {
3501 case DM_REMOVE_ALL:
3502 case DM_LIST_DEVICES:
3503 case DM_DEV_CREATE:
3504 case DM_DEV_REMOVE:
3505 case DM_DEV_SUSPEND:
3506 case DM_DEV_STATUS:
3507 case DM_DEV_WAIT:
3508 case DM_TABLE_STATUS:
3509 case DM_TABLE_CLEAR:
3510 case DM_TABLE_DEPS:
3511 case DM_LIST_VERSIONS:
3512 /* no input data */
3513 break;
3514 case DM_DEV_RENAME:
3515 case DM_DEV_SET_GEOMETRY:
3516 /* data contains only strings */
3517 memcpy(host_data, argptr, guest_data_size);
3518 break;
3519 case DM_TARGET_MSG:
3520 memcpy(host_data, argptr, guest_data_size);
3521 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3522 break;
3523 case DM_TABLE_LOAD:
3524 {
3525 void *gspec = argptr;
3526 void *cur_data = host_data;
3527 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3528 int spec_size = thunk_type_size(arg_type, 0);
3529 int i;
3530
3531 for (i = 0; i < host_dm->target_count; i++) {
3532 struct dm_target_spec *spec = cur_data;
3533 uint32_t next;
3534 int slen;
3535
3536 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3537 slen = strlen((char*)gspec + spec_size) + 1;
3538 next = spec->next;
3539 spec->next = sizeof(*spec) + slen;
3540 strcpy((char*)&spec[1], gspec + spec_size);
3541 gspec += next;
3542 cur_data += spec->next;
3543 }
3544 break;
3545 }
3546 default:
3547 ret = -TARGET_EINVAL;
3548 goto out;
3549 }
3550 unlock_user(argptr, guest_data, 0);
3551
3552 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3553 if (!is_error(ret)) {
3554 guest_data = arg + host_dm->data_start;
3555 guest_data_size = host_dm->data_size - host_dm->data_start;
3556 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3557 switch (ie->host_cmd) {
3558 case DM_REMOVE_ALL:
3559 case DM_DEV_CREATE:
3560 case DM_DEV_REMOVE:
3561 case DM_DEV_RENAME:
3562 case DM_DEV_SUSPEND:
3563 case DM_DEV_STATUS:
3564 case DM_TABLE_LOAD:
3565 case DM_TABLE_CLEAR:
3566 case DM_TARGET_MSG:
3567 case DM_DEV_SET_GEOMETRY:
3568 /* no return data */
3569 break;
3570 case DM_LIST_DEVICES:
3571 {
3572 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3573 uint32_t remaining_data = guest_data_size;
3574 void *cur_data = argptr;
3575 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3576 int nl_size = 12; /* can't use thunk_size due to alignment */
3577
3578 while (1) {
3579 uint32_t next = nl->next;
3580 if (next) {
3581 nl->next = nl_size + (strlen(nl->name) + 1);
3582 }
3583 if (remaining_data < nl->next) {
3584 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3585 break;
3586 }
3587 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3588 strcpy(cur_data + nl_size, nl->name);
3589 cur_data += nl->next;
3590 remaining_data -= nl->next;
3591 if (!next) {
3592 break;
3593 }
3594 nl = (void*)nl + next;
3595 }
3596 break;
3597 }
3598 case DM_DEV_WAIT:
3599 case DM_TABLE_STATUS:
3600 {
3601 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3602 void *cur_data = argptr;
3603 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3604 int spec_size = thunk_type_size(arg_type, 0);
3605 int i;
3606
3607 for (i = 0; i < host_dm->target_count; i++) {
3608 uint32_t next = spec->next;
3609 int slen = strlen((char*)&spec[1]) + 1;
3610 spec->next = (cur_data - argptr) + spec_size + slen;
3611 if (guest_data_size < spec->next) {
3612 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3613 break;
3614 }
3615 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3616 strcpy(cur_data + spec_size, (char*)&spec[1]);
3617 cur_data = argptr + spec->next;
3618 spec = (void*)host_dm + host_dm->data_start + next;
3619 }
3620 break;
3621 }
3622 case DM_TABLE_DEPS:
3623 {
3624 void *hdata = (void*)host_dm + host_dm->data_start;
3625 int count = *(uint32_t*)hdata;
3626 uint64_t *hdev = hdata + 8;
3627 uint64_t *gdev = argptr + 8;
3628 int i;
3629
3630 *(uint32_t*)argptr = tswap32(count);
3631 for (i = 0; i < count; i++) {
3632 *gdev = tswap64(*hdev);
3633 gdev++;
3634 hdev++;
3635 }
3636 break;
3637 }
3638 case DM_LIST_VERSIONS:
3639 {
3640 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3641 uint32_t remaining_data = guest_data_size;
3642 void *cur_data = argptr;
3643 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3644 int vers_size = thunk_type_size(arg_type, 0);
3645
3646 while (1) {
3647 uint32_t next = vers->next;
3648 if (next) {
3649 vers->next = vers_size + (strlen(vers->name) + 1);
3650 }
3651 if (remaining_data < vers->next) {
3652 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3653 break;
3654 }
3655 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3656 strcpy(cur_data + vers_size, vers->name);
3657 cur_data += vers->next;
3658 remaining_data -= vers->next;
3659 if (!next) {
3660 break;
3661 }
3662 vers = (void*)vers + next;
3663 }
3664 break;
3665 }
3666 default:
3667 ret = -TARGET_EINVAL;
3668 goto out;
3669 }
3670 unlock_user(argptr, guest_data, guest_data_size);
3671
3672 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3673 if (!argptr) {
3674 ret = -TARGET_EFAULT;
3675 goto out;
3676 }
3677 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3678 unlock_user(argptr, arg, target_size);
3679 }
3680 out:
3681 g_free(big_buf);
3682 return ret;
3683 }
3684
3685 static IOCTLEntry ioctl_entries[] = {
3686 #define IOCTL(cmd, access, ...) \
3687 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3688 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3689 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3690 #include "ioctls.h"
3691 { 0, 0, },
3692 };
3693
3694 /* ??? Implement proper locking for ioctls. */
3695 /* do_ioctl() Must return target values and target errnos. */
3696 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3697 {
3698 const IOCTLEntry *ie;
3699 const argtype *arg_type;
3700 abi_long ret;
3701 uint8_t buf_temp[MAX_STRUCT_SIZE];
3702 int target_size;
3703 void *argptr;
3704
3705 ie = ioctl_entries;
3706 for(;;) {
3707 if (ie->target_cmd == 0) {
3708 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3709 return -TARGET_ENOSYS;
3710 }
3711 if (ie->target_cmd == cmd)
3712 break;
3713 ie++;
3714 }
3715 arg_type = ie->arg_type;
3716 #if defined(DEBUG)
3717 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3718 #endif
3719 if (ie->do_ioctl) {
3720 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3721 }
3722
3723 switch(arg_type[0]) {
3724 case TYPE_NULL:
3725 /* no argument */
3726 ret = get_errno(ioctl(fd, ie->host_cmd));
3727 break;
3728 case TYPE_PTRVOID:
3729 case TYPE_INT:
3730 /* int argment */
3731 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3732 break;
3733 case TYPE_PTR:
3734 arg_type++;
3735 target_size = thunk_type_size(arg_type, 0);
3736 switch(ie->access) {
3737 case IOC_R:
3738 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3739 if (!is_error(ret)) {
3740 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3741 if (!argptr)
3742 return -TARGET_EFAULT;
3743 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3744 unlock_user(argptr, arg, target_size);
3745 }
3746 break;
3747 case IOC_W:
3748 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3749 if (!argptr)
3750 return -TARGET_EFAULT;
3751 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3752 unlock_user(argptr, arg, 0);
3753 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3754 break;
3755 default:
3756 case IOC_RW:
3757 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3758 if (!argptr)
3759 return -TARGET_EFAULT;
3760 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3761 unlock_user(argptr, arg, 0);
3762 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3763 if (!is_error(ret)) {
3764 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3765 if (!argptr)
3766 return -TARGET_EFAULT;
3767 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3768 unlock_user(argptr, arg, target_size);
3769 }
3770 break;
3771 }
3772 break;
3773 default:
3774 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3775 (long)cmd, arg_type[0]);
3776 ret = -TARGET_ENOSYS;
3777 break;
3778 }
3779 return ret;
3780 }
3781
3782 static const bitmask_transtbl iflag_tbl[] = {
3783 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3784 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3785 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3786 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3787 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3788 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3789 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3790 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3791 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3792 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3793 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3794 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3795 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3796 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3797 { 0, 0, 0, 0 }
3798 };
3799
3800 static const bitmask_transtbl oflag_tbl[] = {
3801 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3802 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3803 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3804 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3805 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3806 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3807 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3808 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3809 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3810 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3811 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3812 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3813 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3814 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3815 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3816 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3817 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3818 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3819 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3820 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3821 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3822 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3823 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3824 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3825 { 0, 0, 0, 0 }
3826 };
3827
3828 static const bitmask_transtbl cflag_tbl[] = {
3829 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3830 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3831 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3832 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3833 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3834 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3835 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3836 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3837 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3838 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3839 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3840 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3841 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3842 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3843 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3844 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3845 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3846 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3847 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3848 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3849 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3850 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3851 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3852 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3853 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3854 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3855 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3856 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3857 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3858 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3859 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3860 { 0, 0, 0, 0 }
3861 };
3862
3863 static const bitmask_transtbl lflag_tbl[] = {
3864 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3865 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3866 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3867 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3868 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3869 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3870 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3871 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3872 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3873 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3874 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3875 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3876 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3877 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3878 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3879 { 0, 0, 0, 0 }
3880 };
3881
3882 static void target_to_host_termios (void *dst, const void *src)
3883 {
3884 struct host_termios *host = dst;
3885 const struct target_termios *target = src;
3886
3887 host->c_iflag =
3888 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3889 host->c_oflag =
3890 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3891 host->c_cflag =
3892 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3893 host->c_lflag =
3894 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3895 host->c_line = target->c_line;
3896
3897 memset(host->c_cc, 0, sizeof(host->c_cc));
3898 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3899 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3900 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3901 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3902 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3903 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3904 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3905 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3906 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3907 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3908 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3909 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3910 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3911 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3912 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3913 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3914 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3915 }
3916
3917 static void host_to_target_termios (void *dst, const void *src)
3918 {
3919 struct target_termios *target = dst;
3920 const struct host_termios *host = src;
3921
3922 target->c_iflag =
3923 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3924 target->c_oflag =
3925 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3926 target->c_cflag =
3927 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3928 target->c_lflag =
3929 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3930 target->c_line = host->c_line;
3931
3932 memset(target->c_cc, 0, sizeof(target->c_cc));
3933 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3934 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3935 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3936 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3937 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3938 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3939 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3940 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3941 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3942 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3943 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3944 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3945 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3946 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3947 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3948 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3949 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3950 }
3951
3952 static const StructEntry struct_termios_def = {
3953 .convert = { host_to_target_termios, target_to_host_termios },
3954 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3955 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3956 };
3957
3958 static bitmask_transtbl mmap_flags_tbl[] = {
3959 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3960 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3961 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3962 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3963 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3964 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3965 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3966 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3967 { 0, 0, 0, 0 }
3968 };
3969
3970 #if defined(TARGET_I386)
3971
3972 /* NOTE: there is really one LDT for all the threads */
3973 static uint8_t *ldt_table;
3974
3975 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3976 {
3977 int size;
3978 void *p;
3979
3980 if (!ldt_table)
3981 return 0;
3982 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3983 if (size > bytecount)
3984 size = bytecount;
3985 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3986 if (!p)
3987 return -TARGET_EFAULT;
3988 /* ??? Should this by byteswapped? */
3989 memcpy(p, ldt_table, size);
3990 unlock_user(p, ptr, size);
3991 return size;
3992 }
3993
3994 /* XXX: add locking support */
3995 static abi_long write_ldt(CPUX86State *env,
3996 abi_ulong ptr, unsigned long bytecount, int oldmode)
3997 {
3998 struct target_modify_ldt_ldt_s ldt_info;
3999 struct target_modify_ldt_ldt_s *target_ldt_info;
4000 int seg_32bit, contents, read_exec_only, limit_in_pages;
4001 int seg_not_present, useable, lm;
4002 uint32_t *lp, entry_1, entry_2;
4003
4004 if (bytecount != sizeof(ldt_info))
4005 return -TARGET_EINVAL;
4006 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4007 return -TARGET_EFAULT;
4008 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4009 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4010 ldt_info.limit = tswap32(target_ldt_info->limit);
4011 ldt_info.flags = tswap32(target_ldt_info->flags);
4012 unlock_user_struct(target_ldt_info, ptr, 0);
4013
4014 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4015 return -TARGET_EINVAL;
4016 seg_32bit = ldt_info.flags & 1;
4017 contents = (ldt_info.flags >> 1) & 3;
4018 read_exec_only = (ldt_info.flags >> 3) & 1;
4019 limit_in_pages = (ldt_info.flags >> 4) & 1;
4020 seg_not_present = (ldt_info.flags >> 5) & 1;
4021 useable = (ldt_info.flags >> 6) & 1;
4022 #ifdef TARGET_ABI32
4023 lm = 0;
4024 #else
4025 lm = (ldt_info.flags >> 7) & 1;
4026 #endif
4027 if (contents == 3) {
4028 if (oldmode)
4029 return -TARGET_EINVAL;
4030 if (seg_not_present == 0)
4031 return -TARGET_EINVAL;
4032 }
4033 /* allocate the LDT */
4034 if (!ldt_table) {
4035 env->ldt.base = target_mmap(0,
4036 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4037 PROT_READ|PROT_WRITE,
4038 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4039 if (env->ldt.base == -1)
4040 return -TARGET_ENOMEM;
4041 memset(g2h(env->ldt.base), 0,
4042 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4043 env->ldt.limit = 0xffff;
4044 ldt_table = g2h(env->ldt.base);
4045 }
4046
4047 /* NOTE: same code as Linux kernel */
4048 /* Allow LDTs to be cleared by the user. */
4049 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4050 if (oldmode ||
4051 (contents == 0 &&
4052 read_exec_only == 1 &&
4053 seg_32bit == 0 &&
4054 limit_in_pages == 0 &&
4055 seg_not_present == 1 &&
4056 useable == 0 )) {
4057 entry_1 = 0;
4058 entry_2 = 0;
4059 goto install;
4060 }
4061 }
4062
4063 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4064 (ldt_info.limit & 0x0ffff);
4065 entry_2 = (ldt_info.base_addr & 0xff000000) |
4066 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4067 (ldt_info.limit & 0xf0000) |
4068 ((read_exec_only ^ 1) << 9) |
4069 (contents << 10) |
4070 ((seg_not_present ^ 1) << 15) |
4071 (seg_32bit << 22) |
4072 (limit_in_pages << 23) |
4073 (lm << 21) |
4074 0x7000;
4075 if (!oldmode)
4076 entry_2 |= (useable << 20);
4077
4078 /* Install the new entry ... */
4079 install:
4080 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4081 lp[0] = tswap32(entry_1);
4082 lp[1] = tswap32(entry_2);
4083 return 0;
4084 }
4085
4086 /* specific and weird i386 syscalls */
4087 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4088 unsigned long bytecount)
4089 {
4090 abi_long ret;
4091
4092 switch (func) {
4093 case 0:
4094 ret = read_ldt(ptr, bytecount);
4095 break;
4096 case 1:
4097 ret = write_ldt(env, ptr, bytecount, 1);
4098 break;
4099 case 0x11:
4100 ret = write_ldt(env, ptr, bytecount, 0);
4101 break;
4102 default:
4103 ret = -TARGET_ENOSYS;
4104 break;
4105 }
4106 return ret;
4107 }
4108
4109 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4110 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4111 {
4112 uint64_t *gdt_table = g2h(env->gdt.base);
4113 struct target_modify_ldt_ldt_s ldt_info;
4114 struct target_modify_ldt_ldt_s *target_ldt_info;
4115 int seg_32bit, contents, read_exec_only, limit_in_pages;
4116 int seg_not_present, useable, lm;
4117 uint32_t *lp, entry_1, entry_2;
4118 int i;
4119
4120 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4121 if (!target_ldt_info)
4122 return -TARGET_EFAULT;
4123 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4124 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4125 ldt_info.limit = tswap32(target_ldt_info->limit);
4126 ldt_info.flags = tswap32(target_ldt_info->flags);
4127 if (ldt_info.entry_number == -1) {
4128 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4129 if (gdt_table[i] == 0) {
4130 ldt_info.entry_number = i;
4131 target_ldt_info->entry_number = tswap32(i);
4132 break;
4133 }
4134 }
4135 }
4136 unlock_user_struct(target_ldt_info, ptr, 1);
4137
4138 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4139 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4140 return -TARGET_EINVAL;
4141 seg_32bit = ldt_info.flags & 1;
4142 contents = (ldt_info.flags >> 1) & 3;
4143 read_exec_only = (ldt_info.flags >> 3) & 1;
4144 limit_in_pages = (ldt_info.flags >> 4) & 1;
4145 seg_not_present = (ldt_info.flags >> 5) & 1;
4146 useable = (ldt_info.flags >> 6) & 1;
4147 #ifdef TARGET_ABI32
4148 lm = 0;
4149 #else
4150 lm = (ldt_info.flags >> 7) & 1;
4151 #endif
4152
4153 if (contents == 3) {
4154 if (seg_not_present == 0)
4155 return -TARGET_EINVAL;
4156 }
4157
4158 /* NOTE: same code as Linux kernel */
4159 /* Allow LDTs to be cleared by the user. */
4160 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4161 if ((contents == 0 &&
4162 read_exec_only == 1 &&
4163 seg_32bit == 0 &&
4164 limit_in_pages == 0 &&
4165 seg_not_present == 1 &&
4166 useable == 0 )) {
4167 entry_1 = 0;
4168 entry_2 = 0;
4169 goto install;
4170 }
4171 }
4172
4173 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4174 (ldt_info.limit & 0x0ffff);
4175 entry_2 = (ldt_info.base_addr & 0xff000000) |
4176 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4177 (ldt_info.limit & 0xf0000) |
4178 ((read_exec_only ^ 1) << 9) |
4179 (contents << 10) |
4180 ((seg_not_present ^ 1) << 15) |
4181 (seg_32bit << 22) |
4182 (limit_in_pages << 23) |
4183 (useable << 20) |
4184 (lm << 21) |
4185 0x7000;
4186
4187 /* Install the new entry ... */
4188 install:
4189 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4190 lp[0] = tswap32(entry_1);
4191 lp[1] = tswap32(entry_2);
4192 return 0;
4193 }
4194
4195 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4196 {
4197 struct target_modify_ldt_ldt_s *target_ldt_info;
4198 uint64_t *gdt_table = g2h(env->gdt.base);
4199 uint32_t base_addr, limit, flags;
4200 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4201 int seg_not_present, useable, lm;
4202 uint32_t *lp, entry_1, entry_2;
4203
4204 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4205 if (!target_ldt_info)
4206 return -TARGET_EFAULT;
4207 idx = tswap32(target_ldt_info->entry_number);
4208 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4209 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4210 unlock_user_struct(target_ldt_info, ptr, 1);
4211 return -TARGET_EINVAL;
4212 }
4213 lp = (uint32_t *)(gdt_table + idx);
4214 entry_1 = tswap32(lp[0]);
4215 entry_2 = tswap32(lp[1]);
4216
4217 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4218 contents = (entry_2 >> 10) & 3;
4219 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4220 seg_32bit = (entry_2 >> 22) & 1;
4221 limit_in_pages = (entry_2 >> 23) & 1;
4222 useable = (entry_2 >> 20) & 1;
4223 #ifdef TARGET_ABI32
4224 lm = 0;
4225 #else
4226 lm = (entry_2 >> 21) & 1;
4227 #endif
4228 flags = (seg_32bit << 0) | (contents << 1) |
4229 (read_exec_only << 3) | (limit_in_pages << 4) |
4230 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4231 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4232 base_addr = (entry_1 >> 16) |
4233 (entry_2 & 0xff000000) |
4234 ((entry_2 & 0xff) << 16);
4235 target_ldt_info->base_addr = tswapal(base_addr);
4236 target_ldt_info->limit = tswap32(limit);
4237 target_ldt_info->flags = tswap32(flags);
4238 unlock_user_struct(target_ldt_info, ptr, 1);
4239 return 0;
4240 }
4241 #endif /* TARGET_I386 && TARGET_ABI32 */
4242
4243 #ifndef TARGET_ABI32
4244 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4245 {
4246 abi_long ret = 0;
4247 abi_ulong val;
4248 int idx;
4249
4250 switch(code) {
4251 case TARGET_ARCH_SET_GS:
4252 case TARGET_ARCH_SET_FS:
4253 if (code == TARGET_ARCH_SET_GS)
4254 idx = R_GS;
4255 else
4256 idx = R_FS;
4257 cpu_x86_load_seg(env, idx, 0);
4258 env->segs[idx].base = addr;
4259 break;
4260 case TARGET_ARCH_GET_GS:
4261 case TARGET_ARCH_GET_FS:
4262 if (code == TARGET_ARCH_GET_GS)
4263 idx = R_GS;
4264 else
4265 idx = R_FS;
4266 val = env->segs[idx].base;
4267 if (put_user(val, addr, abi_ulong))
4268 ret = -TARGET_EFAULT;
4269 break;
4270 default:
4271 ret = -TARGET_EINVAL;
4272 break;
4273 }
4274 return ret;
4275 }
4276 #endif
4277
4278 #endif /* defined(TARGET_I386) */
4279
4280 #define NEW_STACK_SIZE 0x40000
4281
4282 #if defined(CONFIG_USE_NPTL)
4283
4284 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4285 typedef struct {
4286 CPUArchState *env;
4287 pthread_mutex_t mutex;
4288 pthread_cond_t cond;
4289 pthread_t thread;
4290 uint32_t tid;
4291 abi_ulong child_tidptr;
4292 abi_ulong parent_tidptr;
4293 sigset_t sigmask;
4294 } new_thread_info;
4295
4296 static void *clone_func(void *arg)
4297 {
4298 new_thread_info *info = arg;
4299 CPUArchState *env;
4300 TaskState *ts;
4301
4302 env = info->env;
4303 thread_env = env;
4304 ts = (TaskState *)thread_env->opaque;
4305 info->tid = gettid();
4306 env->host_tid = info->tid;
4307 task_settid(ts);
4308 if (info->child_tidptr)
4309 put_user_u32(info->tid, info->child_tidptr);
4310 if (info->parent_tidptr)
4311 put_user_u32(info->tid, info->parent_tidptr);
4312 /* Enable signals. */
4313 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4314 /* Signal to the parent that we're ready. */
4315 pthread_mutex_lock(&info->mutex);
4316 pthread_cond_broadcast(&info->cond);
4317 pthread_mutex_unlock(&info->mutex);
4318 /* Wait until the parent has finshed initializing the tls state. */
4319 pthread_mutex_lock(&clone_lock);
4320 pthread_mutex_unlock(&clone_lock);
4321 cpu_loop(env);
4322 /* never exits */
4323 return NULL;
4324 }
4325 #else
4326
4327 static int clone_func(void *arg)
4328 {
4329 CPUArchState *env = arg;
4330 cpu_loop(env);
4331 /* never exits */
4332 return 0;
4333 }
4334 #endif
4335
4336 /* do_fork() Must return host values and target errnos (unlike most
4337 do_*() functions). */
4338 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4339 abi_ulong parent_tidptr, target_ulong newtls,
4340 abi_ulong child_tidptr)
4341 {
4342 int ret;
4343 TaskState *ts;
4344 CPUArchState *new_env;
4345 #if defined(CONFIG_USE_NPTL)
4346 unsigned int nptl_flags;
4347 sigset_t sigmask;
4348 #else
4349 uint8_t *new_stack;
4350 #endif
4351
4352 /* Emulate vfork() with fork() */
4353 if (flags & CLONE_VFORK)
4354 flags &= ~(CLONE_VFORK | CLONE_VM);
4355
4356 if (flags & CLONE_VM) {
4357 TaskState *parent_ts = (TaskState *)env->opaque;
4358 #if defined(CONFIG_USE_NPTL)
4359 new_thread_info info;
4360 pthread_attr_t attr;
4361 #endif
4362 ts = g_malloc0(sizeof(TaskState));
4363 init_task_state(ts);
4364 /* we create a new CPU instance. */
4365 new_env = cpu_copy(env);
4366 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4367 cpu_reset(ENV_GET_CPU(new_env));
4368 #endif
4369 /* Init regs that differ from the parent. */
4370 cpu_clone_regs(new_env, newsp);
4371 new_env->opaque = ts;
4372 ts->bprm = parent_ts->bprm;
4373 ts->info = parent_ts->info;
4374 #if defined(CONFIG_USE_NPTL)
4375 nptl_flags = flags;
4376 flags &= ~CLONE_NPTL_FLAGS2;
4377
4378 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4379 ts->child_tidptr = child_tidptr;
4380 }
4381
4382 if (nptl_flags & CLONE_SETTLS)
4383 cpu_set_tls (new_env, newtls);
4384
4385 /* Grab a mutex so that thread setup appears atomic. */
4386 pthread_mutex_lock(&clone_lock);
4387
4388 memset(&info, 0, sizeof(info));
4389 pthread_mutex_init(&info.mutex, NULL);
4390 pthread_mutex_lock(&info.mutex);
4391 pthread_cond_init(&info.cond, NULL);
4392 info.env = new_env;
4393 if (nptl_flags & CLONE_CHILD_SETTID)
4394 info.child_tidptr = child_tidptr;
4395 if (nptl_flags & CLONE_PARENT_SETTID)
4396 info.parent_tidptr = parent_tidptr;
4397
4398 ret = pthread_attr_init(&attr);
4399 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4400 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4401 /* It is not safe to deliver signals until the child has finished
4402 initializing, so temporarily block all signals. */
4403 sigfillset(&sigmask);
4404 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4405
4406 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4407 /* TODO: Free new CPU state if thread creation failed. */
4408
4409 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4410 pthread_attr_destroy(&attr);
4411 if (ret == 0) {
4412 /* Wait for the child to initialize. */
4413 pthread_cond_wait(&info.cond, &info.mutex);
4414 ret = info.tid;
4415 if (flags & CLONE_PARENT_SETTID)
4416 put_user_u32(ret, parent_tidptr);
4417 } else {
4418 ret = -1;
4419 }
4420 pthread_mutex_unlock(&info.mutex);
4421 pthread_cond_destroy(&info.cond);
4422 pthread_mutex_destroy(&info.mutex);
4423 pthread_mutex_unlock(&clone_lock);
4424 #else
4425 if (flags & CLONE_NPTL_FLAGS2)
4426 return -EINVAL;
4427 /* This is probably going to die very quickly, but do it anyway. */
4428 new_stack = g_malloc0 (NEW_STACK_SIZE);
4429 #ifdef __ia64__
4430 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4431 #else
4432 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4433 #endif
4434 #endif
4435 } else {
4436 /* if no CLONE_VM, we consider it is a fork */
4437 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4438 return -EINVAL;
4439 fork_start();
4440 ret = fork();
4441 if (ret == 0) {
4442 /* Child Process. */
4443 cpu_clone_regs(env, newsp);
4444 fork_end(1);
4445 #if defined(CONFIG_USE_NPTL)
4446 /* There is a race condition here. The parent process could
4447 theoretically read the TID in the child process before the child
4448 tid is set. This would require using either ptrace
4449 (not implemented) or having *_tidptr to point at a shared memory
4450 mapping. We can't repeat the spinlock hack used above because
4451 the child process gets its own copy of the lock. */
4452 if (flags & CLONE_CHILD_SETTID)
4453 put_user_u32(gettid(), child_tidptr);
4454 if (flags & CLONE_PARENT_SETTID)
4455 put_user_u32(gettid(), parent_tidptr);
4456 ts = (TaskState *)env->opaque;
4457 if (flags & CLONE_SETTLS)
4458 cpu_set_tls (env, newtls);
4459 if (flags & CLONE_CHILD_CLEARTID)
4460 ts->child_tidptr = child_tidptr;
4461 #endif
4462 } else {
4463 fork_end(0);
4464 }
4465 }
4466 return ret;
4467 }
4468
4469 /* warning : doesn't handle linux specific flags... */
4470 static int target_to_host_fcntl_cmd(int cmd)
4471 {
4472 switch(cmd) {
4473 case TARGET_F_DUPFD:
4474 case TARGET_F_GETFD:
4475 case TARGET_F_SETFD:
4476 case TARGET_F_GETFL:
4477 case TARGET_F_SETFL:
4478 return cmd;
4479 case TARGET_F_GETLK:
4480 return F_GETLK;
4481 case TARGET_F_SETLK:
4482 return F_SETLK;
4483 case TARGET_F_SETLKW:
4484 return F_SETLKW;
4485 case TARGET_F_GETOWN:
4486 return F_GETOWN;
4487 case TARGET_F_SETOWN:
4488 return F_SETOWN;
4489 case TARGET_F_GETSIG:
4490 return F_GETSIG;
4491 case TARGET_F_SETSIG:
4492 return F_SETSIG;
4493 #if TARGET_ABI_BITS == 32
4494 case TARGET_F_GETLK64:
4495 return F_GETLK64;
4496 case TARGET_F_SETLK64:
4497 return F_SETLK64;
4498 case TARGET_F_SETLKW64:
4499 return F_SETLKW64;
4500 #endif
4501 case TARGET_F_SETLEASE:
4502 return F_SETLEASE;
4503 case TARGET_F_GETLEASE:
4504 return F_GETLEASE;
4505 #ifdef F_DUPFD_CLOEXEC
4506 case TARGET_F_DUPFD_CLOEXEC:
4507 return F_DUPFD_CLOEXEC;
4508 #endif
4509 case TARGET_F_NOTIFY:
4510 return F_NOTIFY;
4511 default:
4512 return -TARGET_EINVAL;
4513 }
4514 return -TARGET_EINVAL;
4515 }
4516
4517 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4518 {
4519 struct flock fl;
4520 struct target_flock *target_fl;
4521 struct flock64 fl64;
4522 struct target_flock64 *target_fl64;
4523 abi_long ret;
4524 int host_cmd = target_to_host_fcntl_cmd(cmd);
4525
4526 if (host_cmd == -TARGET_EINVAL)
4527 return host_cmd;
4528
4529 switch(cmd) {
4530 case TARGET_F_GETLK:
4531 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4532 return -TARGET_EFAULT;
4533 fl.l_type = tswap16(target_fl->l_type);
4534 fl.l_whence = tswap16(target_fl->l_whence);
4535 fl.l_start = tswapal(target_fl->l_start);
4536 fl.l_len = tswapal(target_fl->l_len);
4537 fl.l_pid = tswap32(target_fl->l_pid);
4538 unlock_user_struct(target_fl, arg, 0);
4539 ret = get_errno(fcntl(fd, host_cmd, &fl));
4540 if (ret == 0) {
4541 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4542 return -TARGET_EFAULT;
4543 target_fl->l_type = tswap16(fl.l_type);
4544 target_fl->l_whence = tswap16(fl.l_whence);
4545 target_fl->l_start = tswapal(fl.l_start);
4546 target_fl->l_len = tswapal(fl.l_len);
4547 target_fl->l_pid = tswap32(fl.l_pid);
4548 unlock_user_struct(target_fl, arg, 1);
4549 }
4550 break;
4551
4552 case TARGET_F_SETLK:
4553 case TARGET_F_SETLKW:
4554 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4555 return -TARGET_EFAULT;
4556 fl.l_type = tswap16(target_fl->l_type);
4557 fl.l_whence = tswap16(target_fl->l_whence);
4558 fl.l_start = tswapal(target_fl->l_start);
4559 fl.l_len = tswapal(target_fl->l_len);
4560 fl.l_pid = tswap32(target_fl->l_pid);
4561 unlock_user_struct(target_fl, arg, 0);
4562 ret = get_errno(fcntl(fd, host_cmd, &fl));
4563 break;
4564
4565 case TARGET_F_GETLK64:
4566 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4567 return -TARGET_EFAULT;
4568 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4569 fl64.l_whence = tswap16(target_fl64->l_whence);
4570 fl64.l_start = tswap64(target_fl64->l_start);
4571 fl64.l_len = tswap64(target_fl64->l_len);
4572 fl64.l_pid = tswap32(target_fl64->l_pid);
4573 unlock_user_struct(target_fl64, arg, 0);
4574 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4575 if (ret == 0) {
4576 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4577 return -TARGET_EFAULT;
4578 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4579 target_fl64->l_whence = tswap16(fl64.l_whence);
4580 target_fl64->l_start = tswap64(fl64.l_start);
4581 target_fl64->l_len = tswap64(fl64.l_len);
4582 target_fl64->l_pid = tswap32(fl64.l_pid);
4583 unlock_user_struct(target_fl64, arg, 1);
4584 }
4585 break;
4586 case TARGET_F_SETLK64:
4587 case TARGET_F_SETLKW64:
4588 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4589 return -TARGET_EFAULT;
4590 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4591 fl64.l_whence = tswap16(target_fl64->l_whence);
4592 fl64.l_start = tswap64(target_fl64->l_start);
4593 fl64.l_len = tswap64(target_fl64->l_len);
4594 fl64.l_pid = tswap32(target_fl64->l_pid);
4595 unlock_user_struct(target_fl64, arg, 0);
4596 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4597 break;
4598
4599 case TARGET_F_GETFL:
4600 ret = get_errno(fcntl(fd, host_cmd, arg));
4601 if (ret >= 0) {
4602 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4603 }
4604 break;
4605
4606 case TARGET_F_SETFL:
4607 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4608 break;
4609
4610 case TARGET_F_SETOWN:
4611 case TARGET_F_GETOWN:
4612 case TARGET_F_SETSIG:
4613 case TARGET_F_GETSIG:
4614 case TARGET_F_SETLEASE:
4615 case TARGET_F_GETLEASE:
4616 ret = get_errno(fcntl(fd, host_cmd, arg));
4617 break;
4618
4619 default:
4620 ret = get_errno(fcntl(fd, cmd, arg));
4621 break;
4622 }
4623 return ret;
4624 }
4625
4626 #ifdef USE_UID16
4627
4628 static inline int high2lowuid(int uid)
4629 {
4630 if (uid > 65535)
4631 return 65534;
4632 else
4633 return uid;
4634 }
4635
4636 static inline int high2lowgid(int gid)
4637 {
4638 if (gid > 65535)
4639 return 65534;
4640 else
4641 return gid;
4642 }
4643
4644 static inline int low2highuid(int uid)
4645 {
4646 if ((int16_t)uid == -1)
4647 return -1;
4648 else
4649 return uid;
4650 }
4651
4652 static inline int low2highgid(int gid)
4653 {
4654 if ((int16_t)gid == -1)
4655 return -1;
4656 else
4657 return gid;
4658 }
4659 static inline int tswapid(int id)
4660 {
4661 return tswap16(id);
4662 }
4663 #else /* !USE_UID16 */
4664 static inline int high2lowuid(int uid)
4665 {
4666 return uid;
4667 }
4668 static inline int high2lowgid(int gid)
4669 {
4670 return gid;
4671 }
4672 static inline int low2highuid(int uid)
4673 {
4674 return uid;
4675 }
4676 static inline int low2highgid(int gid)
4677 {
4678 return gid;
4679 }
4680 static inline int tswapid(int id)
4681 {
4682 return tswap32(id);
4683 }
4684 #endif /* USE_UID16 */
4685
4686 void syscall_init(void)
4687 {
4688 IOCTLEntry *ie;
4689 const argtype *arg_type;
4690 int size;
4691 int i;
4692
4693 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4694 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4695 #include "syscall_types.h"
4696 #undef STRUCT
4697 #undef STRUCT_SPECIAL
4698
4699 /* Build target_to_host_errno_table[] table from
4700 * host_to_target_errno_table[]. */
4701 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4702 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4703 }
4704
4705 /* we patch the ioctl size if necessary. We rely on the fact that
4706 no ioctl has all the bits at '1' in the size field */
4707 ie = ioctl_entries;
4708 while (ie->target_cmd != 0) {
4709 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4710 TARGET_IOC_SIZEMASK) {
4711 arg_type = ie->arg_type;
4712 if (arg_type[0] != TYPE_PTR) {
4713 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4714 ie->target_cmd);
4715 exit(1);
4716 }
4717 arg_type++;
4718 size = thunk_type_size(arg_type, 0);
4719 ie->target_cmd = (ie->target_cmd &
4720 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4721 (size << TARGET_IOC_SIZESHIFT);
4722 }
4723
4724 /* automatic consistency check if same arch */
4725 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4726 (defined(__x86_64__) && defined(TARGET_X86_64))
4727 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4728 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4729 ie->name, ie->target_cmd, ie->host_cmd);
4730 }
4731 #endif
4732 ie++;
4733 }
4734 }
4735
4736 #if TARGET_ABI_BITS == 32
4737 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4738 {
4739 #ifdef TARGET_WORDS_BIGENDIAN
4740 return ((uint64_t)word0 << 32) | word1;
4741 #else
4742 return ((uint64_t)word1 << 32) | word0;
4743 #endif
4744 }
4745 #else /* TARGET_ABI_BITS == 32 */
4746 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4747 {
4748 return word0;
4749 }
4750 #endif /* TARGET_ABI_BITS != 32 */
4751
4752 #ifdef TARGET_NR_truncate64
4753 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4754 abi_long arg2,
4755 abi_long arg3,
4756 abi_long arg4)
4757 {
4758 if (regpairs_aligned(cpu_env)) {
4759 arg2 = arg3;
4760 arg3 = arg4;
4761 }
4762 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4763 }
4764 #endif
4765
4766 #ifdef TARGET_NR_ftruncate64
4767 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4768 abi_long arg2,
4769 abi_long arg3,
4770 abi_long arg4)
4771 {
4772 if (regpairs_aligned(cpu_env)) {
4773 arg2 = arg3;
4774 arg3 = arg4;
4775 }
4776 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4777 }
4778 #endif
4779
4780 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4781 abi_ulong target_addr)
4782 {
4783 struct target_timespec *target_ts;
4784
4785 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4786 return -TARGET_EFAULT;
4787 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4788 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4789 unlock_user_struct(target_ts, target_addr, 0);
4790 return 0;
4791 }
4792
4793 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4794 struct timespec *host_ts)
4795 {
4796 struct target_timespec *target_ts;
4797
4798 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4799 return -TARGET_EFAULT;
4800 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4801 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4802 unlock_user_struct(target_ts, target_addr, 1);
4803 return 0;
4804 }
4805
4806 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4807 static inline abi_long host_to_target_stat64(void *cpu_env,
4808 abi_ulong target_addr,
4809 struct stat *host_st)
4810 {
4811 #ifdef TARGET_ARM
4812 if (((CPUARMState *)cpu_env)->eabi) {
4813 struct target_eabi_stat64 *target_st;
4814
4815 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4816 return -TARGET_EFAULT;
4817 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4818 __put_user(host_st->st_dev, &target_st->st_dev);
4819 __put_user(host_st->st_ino, &target_st->st_ino);
4820 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4821 __put_user(host_st->st_ino, &target_st->__st_ino);
4822 #endif
4823 __put_user(host_st->st_mode, &target_st->st_mode);
4824 __put_user(host_st->st_nlink, &target_st->st_nlink);
4825 __put_user(host_st->st_uid, &target_st->st_uid);
4826 __put_user(host_st->st_gid, &target_st->st_gid);
4827 __put_user(host_st->st_rdev, &target_st->st_rdev);
4828 __put_user(host_st->st_size, &target_st->st_size);
4829 __put_user(host_st->st_blksize, &target_st->st_blksize);
4830 __put_user(host_st->st_blocks, &target_st->st_blocks);
4831 __put_user(host_st->st_atime, &target_st->target_st_atime);
4832 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4833 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4834 unlock_user_struct(target_st, target_addr, 1);
4835 } else
4836 #endif
4837 {
4838 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4839 struct target_stat *target_st;
4840 #else
4841 struct target_stat64 *target_st;
4842 #endif
4843
4844 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4845 return -TARGET_EFAULT;
4846 memset(target_st, 0, sizeof(*target_st));
4847 __put_user(host_st->st_dev, &target_st->st_dev);
4848 __put_user(host_st->st_ino, &target_st->st_ino);
4849 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4850 __put_user(host_st->st_ino, &target_st->__st_ino);
4851 #endif
4852 __put_user(host_st->st_mode, &target_st->st_mode);
4853 __put_user(host_st->st_nlink, &target_st->st_nlink);
4854 __put_user(host_st->st_uid, &target_st->st_uid);
4855 __put_user(host_st->st_gid, &target_st->st_gid);
4856 __put_user(host_st->st_rdev, &target_st->st_rdev);
4857 /* XXX: better use of kernel struct */
4858 __put_user(host_st->st_size, &target_st->st_size);
4859 __put_user(host_st->st_blksize, &target_st->st_blksize);
4860 __put_user(host_st->st_blocks, &target_st->st_blocks);
4861 __put_user(host_st->st_atime, &target_st->target_st_atime);
4862 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4863 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4864 unlock_user_struct(target_st, target_addr, 1);
4865 }
4866
4867 return 0;
4868 }
4869 #endif
4870
4871 #if defined(CONFIG_USE_NPTL)
4872 /* ??? Using host futex calls even when target atomic operations
4873 are not really atomic probably breaks things. However implementing
4874 futexes locally would make futexes shared between multiple processes
4875 tricky. However they're probably useless because guest atomic
4876 operations won't work either. */
4877 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4878 target_ulong uaddr2, int val3)
4879 {
4880 struct timespec ts, *pts;
4881 int base_op;
4882
4883 /* ??? We assume FUTEX_* constants are the same on both host
4884 and target. */
4885 #ifdef FUTEX_CMD_MASK
4886 base_op = op & FUTEX_CMD_MASK;
4887 #else
4888 base_op = op;
4889 #endif
4890 switch (base_op) {
4891 case FUTEX_WAIT:
4892 if (timeout) {
4893 pts = &ts;
4894 target_to_host_timespec(pts, timeout);
4895 } else {
4896 pts = NULL;
4897 }
4898 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4899 pts, NULL, 0));
4900 case FUTEX_WAKE:
4901 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4902 case FUTEX_FD:
4903 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4904 case FUTEX_REQUEUE:
4905 case FUTEX_CMP_REQUEUE:
4906 case FUTEX_WAKE_OP:
4907 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4908 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4909 But the prototype takes a `struct timespec *'; insert casts
4910 to satisfy the compiler. We do not need to tswap TIMEOUT
4911 since it's not compared to guest memory. */
4912 pts = (struct timespec *)(uintptr_t) timeout;
4913 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4914 g2h(uaddr2),
4915 (base_op == FUTEX_CMP_REQUEUE
4916 ? tswap32(val3)
4917 : val3)));
4918 default:
4919 return -TARGET_ENOSYS;
4920 }
4921 }
4922 #endif
4923
4924 /* Map host to target signal numbers for the wait family of syscalls.
4925 Assume all other status bits are the same. */
4926 int host_to_target_waitstatus(int status)
4927 {
4928 if (WIFSIGNALED(status)) {
4929 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4930 }
4931 if (WIFSTOPPED(status)) {
4932 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4933 | (status & 0xff);
4934 }
4935 return status;
4936 }
4937
4938 int get_osversion(void)
4939 {
4940 static int osversion;
4941 struct new_utsname buf;
4942 const char *s;
4943 int i, n, tmp;
4944 if (osversion)
4945 return osversion;
4946 if (qemu_uname_release && *qemu_uname_release) {
4947 s = qemu_uname_release;
4948 } else {
4949 if (sys_uname(&buf))
4950 return 0;
4951 s = buf.release;
4952 }
4953 tmp = 0;
4954 for (i = 0; i < 3; i++) {
4955 n = 0;
4956 while (*s >= '0' && *s <= '9') {
4957 n *= 10;
4958 n += *s - '0';
4959 s++;
4960 }
4961 tmp = (tmp << 8) + n;
4962 if (*s == '.')
4963 s++;
4964 }
4965 osversion = tmp;
4966 return osversion;
4967 }
4968
4969
4970 static int open_self_maps(void *cpu_env, int fd)
4971 {
4972 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4973 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4974 #endif
4975 FILE *fp;
4976 char *line = NULL;
4977 size_t len = 0;
4978 ssize_t read;
4979
4980 fp = fopen("/proc/self/maps", "r");
4981 if (fp == NULL) {
4982 return -EACCES;
4983 }
4984
4985 while ((read = getline(&line, &len, fp)) != -1) {
4986 int fields, dev_maj, dev_min, inode;
4987 uint64_t min, max, offset;
4988 char flag_r, flag_w, flag_x, flag_p;
4989 char path[512] = "";
4990 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4991 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4992 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4993
4994 if ((fields < 10) || (fields > 11)) {
4995 continue;
4996 }
4997 if (!strncmp(path, "[stack]", 7)) {
4998 continue;
4999 }
5000 if (h2g_valid(min) && h2g_valid(max)) {
5001 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5002 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n",
5003 h2g(min), h2g(max), flag_r, flag_w,
5004 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5005 path[0] ? " " : "", path);
5006 }
5007 }
5008
5009 free(line);
5010 fclose(fp);
5011
5012 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5013 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5014 (unsigned long long)ts->info->stack_limit,
5015 (unsigned long long)(ts->info->start_stack +
5016 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
5017 (unsigned long long)0);
5018 #endif
5019
5020 return 0;
5021 }
5022
5023 static int open_self_stat(void *cpu_env, int fd)
5024 {
5025 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5026 abi_ulong start_stack = ts->info->start_stack;
5027 int i;
5028
5029 for (i = 0; i < 44; i++) {
5030 char buf[128];
5031 int len;
5032 uint64_t val = 0;
5033
5034 if (i == 0) {
5035 /* pid */
5036 val = getpid();
5037 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5038 } else if (i == 1) {
5039 /* app name */
5040 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5041 } else if (i == 27) {
5042 /* stack bottom */
5043 val = start_stack;
5044 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5045 } else {
5046 /* for the rest, there is MasterCard */
5047 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5048 }
5049
5050 len = strlen(buf);
5051 if (write(fd, buf, len) != len) {
5052 return -1;
5053 }
5054 }
5055
5056 return 0;
5057 }
5058
5059 static int open_self_auxv(void *cpu_env, int fd)
5060 {
5061 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5062 abi_ulong auxv = ts->info->saved_auxv;
5063 abi_ulong len = ts->info->auxv_len;
5064 char *ptr;
5065
5066 /*
5067 * Auxiliary vector is stored in target process stack.
5068 * read in whole auxv vector and copy it to file
5069 */
5070 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5071 if (ptr != NULL) {
5072 while (len > 0) {
5073 ssize_t r;
5074 r = write(fd, ptr, len);
5075 if (r <= 0) {
5076 break;
5077 }
5078 len -= r;
5079 ptr += r;
5080 }
5081 lseek(fd, 0, SEEK_SET);
5082 unlock_user(ptr, auxv, len);
5083 }
5084
5085 return 0;
5086 }
5087
5088 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5089 {
5090 struct fake_open {
5091 const char *filename;
5092 int (*fill)(void *cpu_env, int fd);
5093 };
5094 const struct fake_open *fake_open;
5095 static const struct fake_open fakes[] = {
5096 { "/proc/self/maps", open_self_maps },
5097 { "/proc/self/stat", open_self_stat },
5098 { "/proc/self/auxv", open_self_auxv },
5099 { NULL, NULL }
5100 };
5101
5102 for (fake_open = fakes; fake_open->filename; fake_open++) {
5103 if (!strncmp(pathname, fake_open->filename,
5104 strlen(fake_open->filename))) {
5105 break;
5106 }
5107 }
5108
5109 if (fake_open->filename) {
5110 const char *tmpdir;
5111 char filename[PATH_MAX];
5112 int fd, r;
5113
5114 /* create temporary file to map stat to */
5115 tmpdir = getenv("TMPDIR");
5116 if (!tmpdir)
5117 tmpdir = "/tmp";
5118 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5119 fd = mkstemp(filename);
5120 if (fd < 0) {
5121 return fd;
5122 }
5123 unlink(filename);
5124
5125 if ((r = fake_open->fill(cpu_env, fd))) {
5126 close(fd);
5127 return r;
5128 }
5129 lseek(fd, 0, SEEK_SET);
5130
5131 return fd;
5132 }
5133
5134 return get_errno(open(path(pathname), flags, mode));
5135 }
5136
5137 /* do_syscall() should always have a single exit point at the end so
5138 that actions, such as logging of syscall results, can be performed.
5139 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5140 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5141 abi_long arg2, abi_long arg3, abi_long arg4,
5142 abi_long arg5, abi_long arg6, abi_long arg7,
5143 abi_long arg8)
5144 {
5145 abi_long ret;
5146 struct stat st;
5147 struct statfs stfs;
5148 void *p;
5149
5150 #ifdef DEBUG
5151 gemu_log("syscall %d", num);
5152 #endif
5153 if(do_strace)
5154 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5155
5156 switch(num) {
5157 case TARGET_NR_exit:
5158 #ifdef CONFIG_USE_NPTL
5159 /* In old applications this may be used to implement _exit(2).
5160 However in threaded applictions it is used for thread termination,
5161 and _exit_group is used for application termination.
5162 Do thread termination if we have more then one thread. */
5163 /* FIXME: This probably breaks if a signal arrives. We should probably
5164 be disabling signals. */
5165 if (first_cpu->next_cpu) {
5166 TaskState *ts;
5167 CPUArchState **lastp;
5168 CPUArchState *p;
5169
5170 cpu_list_lock();
5171 lastp = &first_cpu;
5172 p = first_cpu;
5173 while (p && p != (CPUArchState *)cpu_env) {
5174 lastp = &p->next_cpu;
5175 p = p->next_cpu;
5176 }
5177 /* If we didn't find the CPU for this thread then something is
5178 horribly wrong. */
5179 if (!p)
5180 abort();
5181 /* Remove the CPU from the list. */
5182 *lastp = p->next_cpu;
5183 cpu_list_unlock();
5184 ts = ((CPUArchState *)cpu_env)->opaque;
5185 if (ts->child_tidptr) {
5186 put_user_u32(0, ts->child_tidptr);
5187 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5188 NULL, NULL, 0);
5189 }
5190 thread_env = NULL;
5191 object_delete(OBJECT(ENV_GET_CPU(cpu_env)));
5192 g_free(ts);
5193 pthread_exit(NULL);
5194 }
5195 #endif
5196 #ifdef TARGET_GPROF
5197 _mcleanup();
5198 #endif
5199 gdb_exit(cpu_env, arg1);
5200 _exit(arg1);
5201 ret = 0; /* avoid warning */
5202 break;
5203 case TARGET_NR_read:
5204 if (arg3 == 0)
5205 ret = 0;
5206 else {
5207 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5208 goto efault;
5209 ret = get_errno(read(arg1, p, arg3));
5210 unlock_user(p, arg2, ret);
5211 }
5212 break;
5213 case TARGET_NR_write:
5214 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5215 goto efault;
5216 ret = get_errno(write(arg1, p, arg3));
5217 unlock_user(p, arg2, 0);
5218 break;
5219 case TARGET_NR_open:
5220 if (!(p = lock_user_string(arg1)))
5221 goto efault;
5222 ret = get_errno(do_open(cpu_env, p,
5223 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5224 arg3));
5225 unlock_user(p, arg1, 0);
5226 break;
5227 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5228 case TARGET_NR_openat:
5229 if (!(p = lock_user_string(arg2)))
5230 goto efault;
5231 ret = get_errno(sys_openat(arg1,
5232 path(p),
5233 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5234 arg4));
5235 unlock_user(p, arg2, 0);
5236 break;
5237 #endif
5238 case TARGET_NR_close:
5239 ret = get_errno(close(arg1));
5240 break;
5241 case TARGET_NR_brk:
5242 ret = do_brk(arg1);
5243 break;
5244 case TARGET_NR_fork:
5245 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5246 break;
5247 #ifdef TARGET_NR_waitpid
5248 case TARGET_NR_waitpid:
5249 {
5250 int status;
5251 ret = get_errno(waitpid(arg1, &status, arg3));
5252 if (!is_error(ret) && arg2 && ret
5253 && put_user_s32(host_to_target_waitstatus(status), arg2))
5254 goto efault;
5255 }
5256 break;
5257 #endif
5258 #ifdef TARGET_NR_waitid
5259 case TARGET_NR_waitid:
5260 {
5261 siginfo_t info;
5262 info.si_pid = 0;
5263 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5264 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5265 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5266 goto efault;
5267 host_to_target_siginfo(p, &info);
5268 unlock_user(p, arg3, sizeof(target_siginfo_t));
5269 }
5270 }
5271 break;
5272 #endif
5273 #ifdef TARGET_NR_creat /* not on alpha */
5274 case TARGET_NR_creat:
5275 if (!(p = lock_user_string(arg1)))
5276 goto efault;
5277 ret = get_errno(creat(p, arg2));
5278 unlock_user(p, arg1, 0);
5279 break;
5280 #endif
5281 case TARGET_NR_link:
5282 {
5283 void * p2;
5284 p = lock_user_string(arg1);
5285 p2 = lock_user_string(arg2);
5286 if (!p || !p2)
5287 ret = -TARGET_EFAULT;
5288 else
5289 ret = get_errno(link(p, p2));
5290 unlock_user(p2, arg2, 0);
5291 unlock_user(p, arg1, 0);
5292 }
5293 break;
5294 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5295 case TARGET_NR_linkat:
5296 {
5297 void * p2 = NULL;
5298 if (!arg2 || !arg4)
5299 goto efault;
5300 p = lock_user_string(arg2);
5301 p2 = lock_user_string(arg4);
5302 if (!p || !p2)
5303 ret = -TARGET_EFAULT;
5304 else
5305 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
5306 unlock_user(p, arg2, 0);
5307 unlock_user(p2, arg4, 0);
5308 }
5309 break;
5310 #endif
5311 case TARGET_NR_unlink:
5312 if (!(p = lock_user_string(arg1)))
5313 goto efault;
5314 ret = get_errno(unlink(p));
5315 unlock_user(p, arg1, 0);
5316 break;
5317 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5318 case TARGET_NR_unlinkat:
5319 if (!(p = lock_user_string(arg2)))
5320 goto efault;
5321 ret = get_errno(sys_unlinkat(arg1, p, arg3));
5322 unlock_user(p, arg2, 0);
5323 break;
5324 #endif
5325 case TARGET_NR_execve:
5326 {
5327 char **argp, **envp;
5328 int argc, envc;
5329 abi_ulong gp;
5330 abi_ulong guest_argp;
5331 abi_ulong guest_envp;
5332 abi_ulong addr;
5333 char **q;
5334 int total_size = 0;
5335
5336 argc = 0;
5337 guest_argp = arg2;
5338 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5339 if (get_user_ual(addr, gp))
5340 goto efault;
5341 if (!addr)
5342 break;
5343 argc++;
5344 }
5345 envc = 0;
5346 guest_envp = arg3;
5347 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5348 if (get_user_ual(addr, gp))
5349 goto efault;
5350 if (!addr)
5351 break;
5352 envc++;
5353 }
5354
5355 argp = alloca((argc + 1) * sizeof(void *));
5356 envp = alloca((envc + 1) * sizeof(void *));
5357
5358 for (gp = guest_argp, q = argp; gp;
5359 gp += sizeof(abi_ulong), q++) {
5360 if (get_user_ual(addr, gp))
5361 goto execve_efault;
5362 if (!addr)
5363 break;
5364 if (!(*q = lock_user_string(addr)))
5365 goto execve_efault;
5366 total_size += strlen(*q) + 1;
5367 }
5368 *q = NULL;
5369
5370 for (gp = guest_envp, q = envp; gp;
5371 gp += sizeof(abi_ulong), q++) {
5372 if (get_user_ual(addr, gp))
5373 goto execve_efault;
5374 if (!addr)
5375 break;
5376 if (!(*q = lock_user_string(addr)))
5377 goto execve_efault;
5378 total_size += strlen(*q) + 1;
5379 }
5380 *q = NULL;
5381
5382 /* This case will not be caught by the host's execve() if its
5383 page size is bigger than the target's. */
5384 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5385 ret = -TARGET_E2BIG;
5386 goto execve_end;
5387 }
5388 if (!(p = lock_user_string(arg1)))
5389 goto execve_efault;
5390 ret = get_errno(execve(p, argp, envp));
5391 unlock_user(p, arg1, 0);
5392
5393 goto execve_end;
5394
5395 execve_efault:
5396 ret = -TARGET_EFAULT;
5397
5398 execve_end:
5399 for (gp = guest_argp, q = argp; *q;
5400 gp += sizeof(abi_ulong), q++) {
5401 if (get_user_ual(addr, gp)
5402 || !addr)
5403 break;
5404 unlock_user(*q, addr, 0);
5405 }
5406 for (gp = guest_envp, q = envp; *q;
5407 gp += sizeof(abi_ulong), q++) {
5408 if (get_user_ual(addr, gp)
5409 || !addr)
5410 break;
5411 unlock_user(*q, addr, 0);
5412 }
5413 }
5414 break;
5415 case TARGET_NR_chdir:
5416 if (!(p = lock_user_string(arg1)))
5417 goto efault;
5418 ret = get_errno(chdir(p));
5419 unlock_user(p, arg1, 0);
5420 break;
5421 #ifdef TARGET_NR_time
5422 case TARGET_NR_time:
5423 {
5424 time_t host_time;
5425 ret = get_errno(time(&host_time));
5426 if (!is_error(ret)
5427 && arg1
5428 && put_user_sal(host_time, arg1))
5429 goto efault;
5430 }
5431 break;
5432 #endif
5433 case TARGET_NR_mknod:
5434 if (!(p = lock_user_string(arg1)))
5435 goto efault;
5436 ret = get_errno(mknod(p, arg2, arg3));
5437 unlock_user(p, arg1, 0);
5438 break;
5439 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5440 case TARGET_NR_mknodat:
5441 if (!(p = lock_user_string(arg2)))
5442 goto efault;
5443 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5444 unlock_user(p, arg2, 0);
5445 break;
5446 #endif
5447 case TARGET_NR_chmod:
5448 if (!(p = lock_user_string(arg1)))
5449 goto efault;
5450 ret = get_errno(chmod(p, arg2));
5451 unlock_user(p, arg1, 0);
5452 break;
5453 #ifdef TARGET_NR_break
5454 case TARGET_NR_break:
5455 goto unimplemented;
5456 #endif
5457 #ifdef TARGET_NR_oldstat
5458 case TARGET_NR_oldstat:
5459 goto unimplemented;
5460 #endif
5461 case TARGET_NR_lseek:
5462 ret = get_errno(lseek(arg1, arg2, arg3));
5463 break;
5464 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5465 /* Alpha specific */
5466 case TARGET_NR_getxpid:
5467 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5468 ret = get_errno(getpid());
5469 break;
5470 #endif
5471 #ifdef TARGET_NR_getpid
5472 case TARGET_NR_getpid:
5473 ret = get_errno(getpid());
5474 break;
5475 #endif
5476 case TARGET_NR_mount:
5477 {
5478 /* need to look at the data field */
5479 void *p2, *p3;
5480 p = lock_user_string(arg1);
5481 p2 = lock_user_string(arg2);
5482 p3 = lock_user_string(arg3);
5483 if (!p || !p2 || !p3)
5484 ret = -TARGET_EFAULT;
5485 else {
5486 /* FIXME - arg5 should be locked, but it isn't clear how to
5487 * do that since it's not guaranteed to be a NULL-terminated
5488 * string.
5489 */
5490 if ( ! arg5 )
5491 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5492 else
5493 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5494 }
5495 unlock_user(p, arg1, 0);
5496 unlock_user(p2, arg2, 0);
5497 unlock_user(p3, arg3, 0);
5498 break;
5499 }
5500 #ifdef TARGET_NR_umount
5501 case TARGET_NR_umount:
5502 if (!(p = lock_user_string(arg1)))
5503 goto efault;
5504 ret = get_errno(umount(p));
5505 unlock_user(p, arg1, 0);
5506 break;
5507 #endif
5508 #ifdef TARGET_NR_stime /* not on alpha */
5509 case TARGET_NR_stime:
5510 {
5511 time_t host_time;
5512 if (get_user_sal(host_time, arg1))
5513 goto efault;
5514 ret = get_errno(stime(&host_time));
5515 }
5516 break;
5517 #endif
5518 case TARGET_NR_ptrace:
5519 goto unimplemented;
5520 #ifdef TARGET_NR_alarm /* not on alpha */
5521 case TARGET_NR_alarm:
5522 ret = alarm(arg1);
5523 break;
5524 #endif
5525 #ifdef TARGET_NR_oldfstat
5526 case TARGET_NR_oldfstat:
5527 goto unimplemented;
5528 #endif
5529 #ifdef TARGET_NR_pause /* not on alpha */
5530 case TARGET_NR_pause:
5531 ret = get_errno(pause());
5532 break;
5533 #endif
5534 #ifdef TARGET_NR_utime
5535 case TARGET_NR_utime:
5536 {
5537 struct utimbuf tbuf, *host_tbuf;
5538 struct target_utimbuf *target_tbuf;
5539 if (arg2) {
5540 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5541 goto efault;
5542 tbuf.actime = tswapal(target_tbuf->actime);
5543 tbuf.modtime = tswapal(target_tbuf->modtime);
5544 unlock_user_struct(target_tbuf, arg2, 0);
5545 host_tbuf = &tbuf;
5546 } else {
5547 host_tbuf = NULL;
5548 }
5549 if (!(p = lock_user_string(arg1)))
5550 goto efault;
5551 ret = get_errno(utime(p, host_tbuf));
5552 unlock_user(p, arg1, 0);
5553 }
5554 break;
5555 #endif
5556 case TARGET_NR_utimes:
5557 {
5558 struct timeval *tvp, tv[2];
5559 if (arg2) {
5560 if (copy_from_user_timeval(&tv[0], arg2)
5561 || copy_from_user_timeval(&tv[1],
5562 arg2 + sizeof(struct target_timeval)))
5563 goto efault;
5564 tvp = tv;
5565 } else {
5566 tvp = NULL;
5567 }
5568 if (!(p = lock_user_string(arg1)))
5569 goto efault;
5570 ret = get_errno(utimes(p, tvp));
5571 unlock_user(p, arg1, 0);
5572 }
5573 break;
5574 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5575 case TARGET_NR_futimesat:
5576 {
5577 struct timeval *tvp, tv[2];
5578 if (arg3) {
5579 if (copy_from_user_timeval(&tv[0], arg3)
5580 || copy_from_user_timeval(&tv[1],
5581 arg3 + sizeof(struct target_timeval)))
5582 goto efault;
5583 tvp = tv;
5584 } else {
5585 tvp = NULL;
5586 }
5587 if (!(p = lock_user_string(arg2)))
5588 goto efault;
5589 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5590 unlock_user(p, arg2, 0);
5591 }
5592 break;
5593 #endif
5594 #ifdef TARGET_NR_stty
5595 case TARGET_NR_stty:
5596 goto unimplemented;
5597 #endif
5598 #ifdef TARGET_NR_gtty
5599 case TARGET_NR_gtty:
5600 goto unimplemented;
5601 #endif
5602 case TARGET_NR_access:
5603 if (!(p = lock_user_string(arg1)))
5604 goto efault;
5605 ret = get_errno(access(path(p), arg2));
5606 unlock_user(p, arg1, 0);
5607 break;
5608 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5609 case TARGET_NR_faccessat:
5610 if (!(p = lock_user_string(arg2)))
5611 goto efault;
5612 ret = get_errno(sys_faccessat(arg1, p, arg3));
5613 unlock_user(p, arg2, 0);
5614 break;
5615 #endif
5616 #ifdef TARGET_NR_nice /* not on alpha */
5617 case TARGET_NR_nice:
5618 ret = get_errno(nice(arg1));
5619 break;
5620 #endif
5621 #ifdef TARGET_NR_ftime
5622 case TARGET_NR_ftime:
5623 goto unimplemented;
5624 #endif
5625 case TARGET_NR_sync:
5626 sync();
5627 ret = 0;
5628 break;
5629 case TARGET_NR_kill:
5630 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5631 break;
5632 case TARGET_NR_rename:
5633 {
5634 void *p2;
5635 p = lock_user_string(arg1);
5636 p2 = lock_user_string(arg2);
5637 if (!p || !p2)
5638 ret = -TARGET_EFAULT;
5639 else
5640 ret = get_errno(rename(p, p2));
5641 unlock_user(p2, arg2, 0);
5642 unlock_user(p, arg1, 0);
5643 }
5644 break;
5645 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5646 case TARGET_NR_renameat:
5647 {
5648 void *p2;
5649 p = lock_user_string(arg2);
5650 p2 = lock_user_string(arg4);
5651 if (!p || !p2)
5652 ret = -TARGET_EFAULT;
5653 else
5654 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5655 unlock_user(p2, arg4, 0);
5656 unlock_user(p, arg2, 0);
5657 }
5658 break;
5659 #endif
5660 case TARGET_NR_mkdir:
5661 if (!(p = lock_user_string(arg1)))
5662 goto efault;
5663 ret = get_errno(mkdir(p, arg2));
5664 unlock_user(p, arg1, 0);
5665 break;
5666 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5667 case TARGET_NR_mkdirat:
5668 if (!(p = lock_user_string(arg2)))
5669 goto efault;
5670 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5671 unlock_user(p, arg2, 0);
5672 break;
5673 #endif
5674 case TARGET_NR_rmdir:
5675 if (!(p = lock_user_string(arg1)))
5676 goto efault;
5677 ret = get_errno(rmdir(p));
5678 unlock_user(p, arg1, 0);
5679 break;
5680 case TARGET_NR_dup:
5681 ret = get_errno(dup(arg1));
5682 break;
5683 case TARGET_NR_pipe:
5684 ret = do_pipe(cpu_env, arg1, 0, 0);
5685 break;
5686 #ifdef TARGET_NR_pipe2
5687 case TARGET_NR_pipe2:
5688 ret = do_pipe(cpu_env, arg1,
5689 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5690 break;
5691 #endif
5692 case TARGET_NR_times:
5693 {
5694 struct target_tms *tmsp;
5695 struct tms tms;
5696 ret = get_errno(times(&tms));
5697 if (arg1) {
5698 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5699 if (!tmsp)
5700 goto efault;
5701 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5702 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5703 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5704 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5705 }
5706 if (!is_error(ret))
5707 ret = host_to_target_clock_t(ret);
5708 }
5709 break;
5710 #ifdef TARGET_NR_prof
5711 case TARGET_NR_prof:
5712 goto unimplemented;
5713 #endif
5714 #ifdef TARGET_NR_signal
5715 case TARGET_NR_signal:
5716 goto unimplemented;
5717 #endif
5718 case TARGET_NR_acct:
5719 if (arg1 == 0) {
5720 ret = get_errno(acct(NULL));
5721 } else {
5722 if (!(p = lock_user_string(arg1)))
5723 goto efault;
5724 ret = get_errno(acct(path(p)));
5725 unlock_user(p, arg1, 0);
5726 }
5727 break;
5728 #ifdef TARGET_NR_umount2 /* not on alpha */
5729 case TARGET_NR_umount2:
5730 if (!(p = lock_user_string(arg1)))
5731 goto efault;
5732 ret = get_errno(umount2(p, arg2));
5733 unlock_user(p, arg1, 0);
5734 break;
5735 #endif
5736 #ifdef TARGET_NR_lock
5737 case TARGET_NR_lock:
5738 goto unimplemented;
5739 #endif
5740 case TARGET_NR_ioctl:
5741 ret = do_ioctl(arg1, arg2, arg3);
5742 break;
5743 case TARGET_NR_fcntl:
5744 ret = do_fcntl(arg1, arg2, arg3);
5745 break;
5746 #ifdef TARGET_NR_mpx
5747 case TARGET_NR_mpx:
5748 goto unimplemented;
5749 #endif
5750 case TARGET_NR_setpgid:
5751 ret = get_errno(setpgid(arg1, arg2));
5752 break;
5753 #ifdef TARGET_NR_ulimit
5754 case TARGET_NR_ulimit:
5755 goto unimplemented;
5756 #endif
5757 #ifdef TARGET_NR_oldolduname
5758 case TARGET_NR_oldolduname:
5759 goto unimplemented;
5760 #endif
5761 case TARGET_NR_umask:
5762 ret = get_errno(umask(arg1));
5763 break;
5764 case TARGET_NR_chroot:
5765 if (!(p = lock_user_string(arg1)))
5766 goto efault;
5767 ret = get_errno(chroot(p));
5768 unlock_user(p, arg1, 0);
5769 break;
5770 case TARGET_NR_ustat:
5771 goto unimplemented;
5772 case TARGET_NR_dup2:
5773 ret = get_errno(dup2(arg1, arg2));
5774 break;
5775 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5776 case TARGET_NR_dup3:
5777 ret = get_errno(dup3(arg1, arg2, arg3));
5778 break;
5779 #endif
5780 #ifdef TARGET_NR_getppid /* not on alpha */
5781 case TARGET_NR_getppid:
5782 ret = get_errno(getppid());
5783 break;
5784 #endif
5785 case TARGET_NR_getpgrp:
5786 ret = get_errno(getpgrp());
5787 break;
5788 case TARGET_NR_setsid:
5789 ret = get_errno(setsid());
5790 break;
5791 #ifdef TARGET_NR_sigaction
5792 case TARGET_NR_sigaction:
5793 {
5794 #if defined(TARGET_ALPHA)
5795 struct target_sigaction act, oact, *pact = 0;
5796 struct target_old_sigaction *old_act;
5797 if (arg2) {
5798 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5799 goto efault;
5800 act._sa_handler = old_act->_sa_handler;
5801 target_siginitset(&act.sa_mask, old_act->sa_mask);
5802 act.sa_flags = old_act->sa_flags;
5803 act.sa_restorer = 0;
5804 unlock_user_struct(old_act, arg2, 0);
5805 pact = &act;
5806 }
5807 ret = get_errno(do_sigaction(arg1, pact, &oact));
5808 if (!is_error(ret) && arg3) {
5809 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5810 goto efault;
5811 old_act->_sa_handler = oact._sa_handler;
5812 old_act->sa_mask = oact.sa_mask.sig[0];
5813 old_act->sa_flags = oact.sa_flags;
5814 unlock_user_struct(old_act, arg3, 1);
5815 }
5816 #elif defined(TARGET_MIPS)
5817 struct target_sigaction act, oact, *pact, *old_act;
5818
5819 if (arg2) {
5820 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5821 goto efault;
5822 act._sa_handler = old_act->_sa_handler;
5823 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5824 act.sa_flags = old_act->sa_flags;
5825 unlock_user_struct(old_act, arg2, 0);
5826 pact = &act;
5827 } else {
5828 pact = NULL;
5829 }
5830
5831 ret = get_errno(do_sigaction(arg1, pact, &oact));
5832
5833 if (!is_error(ret) && arg3) {
5834 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5835 goto efault;
5836 old_act->_sa_handler = oact._sa_handler;
5837 old_act->sa_flags = oact.sa_flags;
5838 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5839 old_act->sa_mask.sig[1] = 0;
5840 old_act->sa_mask.sig[2] = 0;
5841 old_act->sa_mask.sig[3] = 0;
5842 unlock_user_struct(old_act, arg3, 1);
5843 }
5844 #else
5845 struct target_old_sigaction *old_act;
5846 struct target_sigaction act, oact, *pact;
5847 if (arg2) {
5848 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5849 goto efault;
5850 act._sa_handler = old_act->_sa_handler;
5851 target_siginitset(&act.sa_mask, old_act->sa_mask);
5852 act.sa_flags = old_act->sa_flags;
5853 act.sa_restorer = old_act->sa_restorer;
5854 unlock_user_struct(old_act, arg2, 0);
5855 pact = &act;
5856 } else {
5857 pact = NULL;
5858 }
5859 ret = get_errno(do_sigaction(arg1, pact, &oact));
5860 if (!is_error(ret) && arg3) {
5861 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5862 goto efault;
5863 old_act->_sa_handler = oact._sa_handler;
5864 old_act->sa_mask = oact.sa_mask.sig[0];
5865 old_act->sa_flags = oact.sa_flags;
5866 old_act->sa_restorer = oact.sa_restorer;
5867 unlock_user_struct(old_act, arg3, 1);
5868 }
5869 #endif
5870 }
5871 break;
5872 #endif
5873 case TARGET_NR_rt_sigaction:
5874 {
5875 #if defined(TARGET_ALPHA)
5876 struct target_sigaction act, oact, *pact = 0;
5877 struct target_rt_sigaction *rt_act;
5878 /* ??? arg4 == sizeof(sigset_t). */
5879 if (arg2) {
5880 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5881 goto efault;
5882 act._sa_handler = rt_act->_sa_handler;
5883 act.sa_mask = rt_act->sa_mask;
5884 act.sa_flags = rt_act->sa_flags;
5885 act.sa_restorer = arg5;
5886 unlock_user_struct(rt_act, arg2, 0);
5887 pact = &act;
5888 }
5889 ret = get_errno(do_sigaction(arg1, pact, &oact));
5890 if (!is_error(ret) && arg3) {
5891 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5892 goto efault;
5893 rt_act->_sa_handler = oact._sa_handler;
5894 rt_act->sa_mask = oact.sa_mask;
5895 rt_act->sa_flags = oact.sa_flags;
5896 unlock_user_struct(rt_act, arg3, 1);
5897 }
5898 #else
5899 struct target_sigaction *act;
5900 struct target_sigaction *oact;
5901
5902 if (arg2) {
5903 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5904 goto efault;
5905 } else
5906 act = NULL;
5907 if (arg3) {
5908 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5909 ret = -TARGET_EFAULT;
5910 goto rt_sigaction_fail;
5911 }
5912 } else
5913 oact = NULL;
5914 ret = get_errno(do_sigaction(arg1, act, oact));
5915 rt_sigaction_fail:
5916 if (act)
5917 unlock_user_struct(act, arg2, 0);
5918 if (oact)
5919 unlock_user_struct(oact, arg3, 1);
5920 #endif
5921 }
5922 break;
5923 #ifdef TARGET_NR_sgetmask /* not on alpha */
5924 case TARGET_NR_sgetmask:
5925 {
5926 sigset_t cur_set;
5927 abi_ulong target_set;
5928 sigprocmask(0, NULL, &cur_set);
5929 host_to_target_old_sigset(&target_set, &cur_set);
5930 ret = target_set;
5931 }
5932 break;
5933 #endif
5934 #ifdef TARGET_NR_ssetmask /* not on alpha */
5935 case TARGET_NR_ssetmask:
5936 {
5937 sigset_t set, oset, cur_set;
5938 abi_ulong target_set = arg1;
5939 sigprocmask(0, NULL, &cur_set);
5940 target_to_host_old_sigset(&set, &target_set);
5941 sigorset(&set, &set, &cur_set);
5942 sigprocmask(SIG_SETMASK, &set, &oset);
5943 host_to_target_old_sigset(&target_set, &oset);
5944 ret = target_set;
5945 }
5946 break;
5947 #endif
5948 #ifdef TARGET_NR_sigprocmask
5949 case TARGET_NR_sigprocmask:
5950 {
5951 #if defined(TARGET_ALPHA)
5952 sigset_t set, oldset;
5953 abi_ulong mask;
5954 int how;
5955
5956 switch (arg1) {
5957 case TARGET_SIG_BLOCK:
5958 how = SIG_BLOCK;
5959 break;
5960 case TARGET_SIG_UNBLOCK:
5961 how = SIG_UNBLOCK;
5962 break;
5963 case TARGET_SIG_SETMASK:
5964 how = SIG_SETMASK;
5965 break;
5966 default:
5967 ret = -TARGET_EINVAL;
5968 goto fail;
5969 }
5970 mask = arg2;
5971 target_to_host_old_sigset(&set, &mask);
5972
5973 ret = get_errno(sigprocmask(how, &set, &oldset));
5974 if (!is_error(ret)) {
5975 host_to_target_old_sigset(&mask, &oldset);
5976 ret = mask;
5977 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5978 }
5979 #else
5980 sigset_t set, oldset, *set_ptr;
5981 int how;
5982
5983 if (arg2) {
5984 switch (arg1) {
5985 case TARGET_SIG_BLOCK:
5986 how = SIG_BLOCK;
5987 break;
5988 case TARGET_SIG_UNBLOCK:
5989 how = SIG_UNBLOCK;
5990 break;
5991 case TARGET_SIG_SETMASK:
5992 how = SIG_SETMASK;
5993 break;
5994 default:
5995 ret = -TARGET_EINVAL;
5996 goto fail;
5997 }
5998 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5999 goto efault;
6000 target_to_host_old_sigset(&set, p);
6001 unlock_user(p, arg2, 0);
6002 set_ptr = &set;
6003 } else {
6004 how = 0;
6005 set_ptr = NULL;
6006 }
6007 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6008 if (!is_error(ret) && arg3) {
6009 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6010 goto efault;
6011 host_to_target_old_sigset(p, &oldset);
6012 unlock_user(p, arg3, sizeof(target_sigset_t));
6013 }
6014 #endif
6015 }
6016 break;
6017 #endif
6018 case TARGET_NR_rt_sigprocmask:
6019 {
6020 int how = arg1;
6021 sigset_t set, oldset, *set_ptr;
6022
6023 if (arg2) {
6024 switch(how) {
6025 case TARGET_SIG_BLOCK:
6026 how = SIG_BLOCK;
6027 break;
6028 case TARGET_SIG_UNBLOCK:
6029 how = SIG_UNBLOCK;
6030 break;
6031 case TARGET_SIG_SETMASK:
6032 how = SIG_SETMASK;
6033 break;
6034 default:
6035 ret = -TARGET_EINVAL;
6036 goto fail;
6037 }
6038 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6039 goto efault;
6040 target_to_host_sigset(&set, p);
6041 unlock_user(p, arg2, 0);
6042 set_ptr = &set;
6043 } else {
6044 how = 0;
6045 set_ptr = NULL;
6046 }
6047 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6048 if (!is_error(ret) && arg3) {
6049 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6050 goto efault;
6051 host_to_target_sigset(p, &oldset);
6052 unlock_user(p, arg3, sizeof(target_sigset_t));
6053 }
6054 }
6055 break;
6056 #ifdef TARGET_NR_sigpending
6057 case TARGET_NR_sigpending:
6058 {
6059 sigset_t set;
6060 ret = get_errno(sigpending(&set));
6061 if (!is_error(ret)) {
6062 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6063 goto efault;
6064 host_to_target_old_sigset(p, &set);
6065 unlock_user(p, arg1, sizeof(target_sigset_t));
6066 }
6067 }
6068 break;
6069 #endif
6070 case TARGET_NR_rt_sigpending:
6071 {
6072 sigset_t set;
6073 ret = get_errno(sigpending(&set));
6074 if (!is_error(ret)) {
6075 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6076 goto efault;
6077 host_to_target_sigset(p, &set);
6078 unlock_user(p, arg1, sizeof(target_sigset_t));
6079 }
6080 }
6081 break;
6082 #ifdef TARGET_NR_sigsuspend
6083 case TARGET_NR_sigsuspend:
6084 {
6085 sigset_t set;
6086 #if defined(TARGET_ALPHA)
6087 abi_ulong mask = arg1;
6088 target_to_host_old_sigset(&set, &mask);
6089 #else
6090 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6091 goto efault;
6092 target_to_host_old_sigset(&set, p);
6093 unlock_user(p, arg1, 0);
6094 #endif
6095 ret = get_errno(sigsuspend(&set));
6096 }
6097 break;
6098 #endif
6099 case TARGET_NR_rt_sigsuspend:
6100 {
6101 sigset_t set;
6102 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6103 goto efault;
6104 target_to_host_sigset(&set, p);
6105 unlock_user(p, arg1, 0);
6106 ret = get_errno(sigsuspend(&set));
6107 }
6108 break;
6109 case TARGET_NR_rt_sigtimedwait:
6110 {
6111 sigset_t set;
6112 struct timespec uts, *puts;
6113 siginfo_t uinfo;
6114
6115 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6116 goto efault;
6117 target_to_host_sigset(&set, p);
6118 unlock_user(p, arg1, 0);
6119 if (arg3) {
6120 puts = &uts;
6121 target_to_host_timespec(puts, arg3);
6122 } else {
6123 puts = NULL;
6124 }
6125 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6126 if (!is_error(ret) && arg2) {
6127 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6128 goto efault;
6129 host_to_target_siginfo(p, &uinfo);
6130 unlock_user(p, arg2, sizeof(target_siginfo_t));
6131 }
6132 }
6133 break;
6134 case TARGET_NR_rt_sigqueueinfo:
6135 {
6136 siginfo_t uinfo;
6137 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6138 goto efault;
6139 target_to_host_siginfo(&uinfo, p);
6140 unlock_user(p, arg1, 0);
6141 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6142 }
6143 break;
6144 #ifdef TARGET_NR_sigreturn
6145 case TARGET_NR_sigreturn:
6146 /* NOTE: ret is eax, so not transcoding must be done */
6147 ret = do_sigreturn(cpu_env);
6148 break;
6149 #endif
6150 case TARGET_NR_rt_sigreturn:
6151 /* NOTE: ret is eax, so not transcoding must be done */
6152 ret = do_rt_sigreturn(cpu_env);
6153 break;
6154 case TARGET_NR_sethostname:
6155 if (!(p = lock_user_string(arg1)))
6156 goto efault;
6157 ret = get_errno(sethostname(p, arg2));
6158 unlock_user(p, arg1, 0);
6159 break;
6160 case TARGET_NR_setrlimit:
6161 {
6162 int resource = target_to_host_resource(arg1);
6163 struct target_rlimit *target_rlim;
6164 struct rlimit rlim;
6165 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6166 goto efault;
6167 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6168 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6169 unlock_user_struct(target_rlim, arg2, 0);
6170 ret = get_errno(setrlimit(resource, &rlim));
6171 }
6172 break;
6173 case TARGET_NR_getrlimit:
6174 {
6175 int resource = target_to_host_resource(arg1);
6176 struct target_rlimit *target_rlim;
6177 struct rlimit rlim;
6178
6179 ret = get_errno(getrlimit(resource, &rlim));
6180 if (!is_error(ret)) {
6181 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6182 goto efault;
6183 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6184 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6185 unlock_user_struct(target_rlim, arg2, 1);
6186 }
6187 }
6188 break;
6189 case TARGET_NR_getrusage:
6190 {
6191 struct rusage rusage;
6192 ret = get_errno(getrusage(arg1, &rusage));
6193 if (!is_error(ret)) {
6194 host_to_target_rusage(arg2, &rusage);
6195 }
6196 }
6197 break;
6198 case TARGET_NR_gettimeofday:
6199 {
6200 struct timeval tv;
6201 ret = get_errno(gettimeofday(&tv, NULL));
6202 if (!is_error(ret)) {
6203 if (copy_to_user_timeval(arg1, &tv))
6204 goto efault;
6205 }
6206 }
6207 break;
6208 case TARGET_NR_settimeofday:
6209 {
6210 struct timeval tv;
6211 if (copy_from_user_timeval(&tv, arg1))
6212 goto efault;
6213 ret = get_errno(settimeofday(&tv, NULL));
6214 }
6215 break;
6216 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
6217 case TARGET_NR_select:
6218 {
6219 struct target_sel_arg_struct *sel;
6220 abi_ulong inp, outp, exp, tvp;
6221 long nsel;
6222
6223 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6224 goto efault;
6225 nsel = tswapal(sel->n);
6226 inp = tswapal(sel->inp);
6227 outp = tswapal(sel->outp);
6228 exp = tswapal(sel->exp);
6229 tvp = tswapal(sel->tvp);
6230 unlock_user_struct(sel, arg1, 0);
6231 ret = do_select(nsel, inp, outp, exp, tvp);
6232 }
6233 break;
6234 #endif
6235 #ifdef TARGET_NR_pselect6
6236 case TARGET_NR_pselect6:
6237 {
6238 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6239 fd_set rfds, wfds, efds;
6240 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6241 struct timespec ts, *ts_ptr;
6242
6243 /*
6244 * The 6th arg is actually two args smashed together,
6245 * so we cannot use the C library.
6246 */
6247 sigset_t set;
6248 struct {
6249 sigset_t *set;
6250 size_t size;
6251 } sig, *sig_ptr;
6252
6253 abi_ulong arg_sigset, arg_sigsize, *arg7;
6254 target_sigset_t *target_sigset;
6255
6256 n = arg1;
6257 rfd_addr = arg2;
6258 wfd_addr = arg3;
6259 efd_addr = arg4;
6260 ts_addr = arg5;
6261
6262 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6263 if (ret) {
6264 goto fail;
6265 }
6266 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6267 if (ret) {
6268 goto fail;
6269 }
6270 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6271 if (ret) {
6272 goto fail;
6273 }
6274
6275 /*
6276 * This takes a timespec, and not a timeval, so we cannot
6277 * use the do_select() helper ...
6278 */
6279 if (ts_addr) {
6280 if (target_to_host_timespec(&ts, ts_addr)) {
6281 goto efault;
6282 }
6283 ts_ptr = &ts;
6284 } else {
6285 ts_ptr = NULL;
6286 }
6287
6288 /* Extract the two packed args for the sigset */
6289 if (arg6) {
6290 sig_ptr = &sig;
6291 sig.size = _NSIG / 8;
6292
6293 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6294 if (!arg7) {
6295 goto efault;
6296 }
6297 arg_sigset = tswapal(arg7[0]);
6298 arg_sigsize = tswapal(arg7[1]);
6299 unlock_user(arg7, arg6, 0);
6300
6301 if (arg_sigset) {
6302 sig.set = &set;
6303 if (arg_sigsize != sizeof(*target_sigset)) {
6304 /* Like the kernel, we enforce correct size sigsets */
6305 ret = -TARGET_EINVAL;
6306 goto fail;
6307 }
6308 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6309 sizeof(*target_sigset), 1);
6310 if (!target_sigset) {
6311 goto efault;
6312 }
6313 target_to_host_sigset(&set, target_sigset);
6314 unlock_user(target_sigset, arg_sigset, 0);
6315 } else {
6316 sig.set = NULL;
6317 }
6318 } else {
6319 sig_ptr = NULL;
6320 }
6321
6322 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6323 ts_ptr, sig_ptr));
6324
6325 if (!is_error(ret)) {
6326 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6327 goto efault;
6328 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6329 goto efault;
6330 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6331 goto efault;
6332
6333 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6334 goto efault;
6335 }
6336 }
6337 break;
6338 #endif
6339 case TARGET_NR_symlink:
6340 {
6341 void *p2;
6342 p = lock_user_string(arg1);
6343 p2 = lock_user_string(arg2);
6344 if (!p || !p2)
6345 ret = -TARGET_EFAULT;
6346 else
6347 ret = get_errno(symlink(p, p2));
6348 unlock_user(p2, arg2, 0);
6349 unlock_user(p, arg1, 0);
6350 }
6351 break;
6352 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6353 case TARGET_NR_symlinkat:
6354 {
6355 void *p2;
6356 p = lock_user_string(arg1);
6357 p2 = lock_user_string(arg3);
6358 if (!p || !p2)
6359 ret = -TARGET_EFAULT;
6360 else
6361 ret = get_errno(sys_symlinkat(p, arg2, p2));
6362 unlock_user(p2, arg3, 0);
6363 unlock_user(p, arg1, 0);
6364 }
6365 break;
6366 #endif
6367 #ifdef TARGET_NR_oldlstat
6368 case TARGET_NR_oldlstat:
6369 goto unimplemented;
6370 #endif
6371 case TARGET_NR_readlink:
6372 {
6373 void *p2, *temp;
6374 p = lock_user_string(arg1);
6375 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6376 if (!p || !p2)
6377 ret = -TARGET_EFAULT;
6378 else {
6379 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6380 char real[PATH_MAX];
6381 temp = realpath(exec_path,real);
6382 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6383 snprintf((char *)p2, arg3, "%s", real);
6384 }
6385 else
6386 ret = get_errno(readlink(path(p), p2, arg3));
6387 }
6388 unlock_user(p2, arg2, ret);
6389 unlock_user(p, arg1, 0);
6390 }
6391 break;
6392 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6393 case TARGET_NR_readlinkat:
6394 {
6395 void *p2;
6396 p = lock_user_string(arg2);
6397 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6398 if (!p || !p2)
6399 ret = -TARGET_EFAULT;
6400 else
6401 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6402 unlock_user(p2, arg3, ret);
6403 unlock_user(p, arg2, 0);
6404 }
6405 break;
6406 #endif
6407 #ifdef TARGET_NR_uselib
6408 case TARGET_NR_uselib:
6409 goto unimplemented;
6410 #endif
6411 #ifdef TARGET_NR_swapon
6412 case TARGET_NR_swapon:
6413 if (!(p = lock_user_string(arg1)))
6414 goto efault;
6415 ret = get_errno(swapon(p, arg2));
6416 unlock_user(p, arg1, 0);
6417 break;
6418 #endif
6419 case TARGET_NR_reboot:
6420 if (!(p = lock_user_string(arg4)))
6421 goto efault;
6422 ret = reboot(arg1, arg2, arg3, p);
6423 unlock_user(p, arg4, 0);
6424 break;
6425 #ifdef TARGET_NR_readdir
6426 case TARGET_NR_readdir:
6427 goto unimplemented;
6428 #endif
6429 #ifdef TARGET_NR_mmap
6430 case TARGET_NR_mmap:
6431 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6432 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6433 || defined(TARGET_S390X)
6434 {
6435 abi_ulong *v;
6436 abi_ulong v1, v2, v3, v4, v5, v6;
6437 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6438 goto efault;
6439 v1 = tswapal(v[0]);
6440 v2 = tswapal(v[1]);
6441 v3 = tswapal(v[2]);
6442 v4 = tswapal(v[3]);
6443 v5 = tswapal(v[4]);
6444 v6 = tswapal(v[5]);
6445 unlock_user(v, arg1, 0);
6446 ret = get_errno(target_mmap(v1, v2, v3,
6447 target_to_host_bitmask(v4, mmap_flags_tbl),
6448 v5, v6));
6449 }
6450 #else
6451 ret = get_errno(target_mmap(arg1, arg2, arg3,
6452 target_to_host_bitmask(arg4, mmap_flags_tbl),
6453 arg5,
6454 arg6));
6455 #endif
6456 break;
6457 #endif
6458 #ifdef TARGET_NR_mmap2
6459 case TARGET_NR_mmap2:
6460 #ifndef MMAP_SHIFT
6461 #define MMAP_SHIFT 12
6462 #endif
6463 ret = get_errno(target_mmap(arg1, arg2, arg3,
6464 target_to_host_bitmask(arg4, mmap_flags_tbl),
6465 arg5,
6466 arg6 << MMAP_SHIFT));
6467 break;
6468 #endif
6469 case TARGET_NR_munmap:
6470 ret = get_errno(target_munmap(arg1, arg2));
6471 break;
6472 case TARGET_NR_mprotect:
6473 {
6474 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6475 /* Special hack to detect libc making the stack executable. */
6476 if ((arg3 & PROT_GROWSDOWN)
6477 && arg1 >= ts->info->stack_limit
6478 && arg1 <= ts->info->start_stack) {
6479 arg3 &= ~PROT_GROWSDOWN;
6480 arg2 = arg2 + arg1 - ts->info->stack_limit;
6481 arg1 = ts->info->stack_limit;
6482 }
6483 }
6484 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6485 break;
6486 #ifdef TARGET_NR_mremap
6487 case TARGET_NR_mremap:
6488 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6489 break;
6490 #endif
6491 /* ??? msync/mlock/munlock are broken for softmmu. */
6492 #ifdef TARGET_NR_msync
6493 case TARGET_NR_msync:
6494 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6495 break;
6496 #endif
6497 #ifdef TARGET_NR_mlock
6498 case TARGET_NR_mlock:
6499 ret = get_errno(mlock(g2h(arg1), arg2));
6500 break;
6501 #endif
6502 #ifdef TARGET_NR_munlock
6503 case TARGET_NR_munlock:
6504 ret = get_errno(munlock(g2h(arg1), arg2));
6505 break;
6506 #endif
6507 #ifdef TARGET_NR_mlockall
6508 case TARGET_NR_mlockall:
6509 ret = get_errno(mlockall(arg1));
6510 break;
6511 #endif
6512 #ifdef TARGET_NR_munlockall
6513 case TARGET_NR_munlockall:
6514 ret = get_errno(munlockall());
6515 break;
6516 #endif
6517 case TARGET_NR_truncate:
6518 if (!(p = lock_user_string(arg1)))
6519 goto efault;
6520 ret = get_errno(truncate(p, arg2));
6521 unlock_user(p, arg1, 0);
6522 break;
6523 case TARGET_NR_ftruncate:
6524 ret = get_errno(ftruncate(arg1, arg2));
6525 break;
6526 case TARGET_NR_fchmod:
6527 ret = get_errno(fchmod(arg1, arg2));
6528 break;
6529 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6530 case TARGET_NR_fchmodat:
6531 if (!(p = lock_user_string(arg2)))
6532 goto efault;
6533 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6534 unlock_user(p, arg2, 0);
6535 break;
6536 #endif
6537 case TARGET_NR_getpriority:
6538 /* Note that negative values are valid for getpriority, so we must
6539 differentiate based on errno settings. */
6540 errno = 0;
6541 ret = getpriority(arg1, arg2);
6542 if (ret == -1 && errno != 0) {
6543 ret = -host_to_target_errno(errno);
6544 break;
6545 }
6546 #ifdef TARGET_ALPHA
6547 /* Return value is the unbiased priority. Signal no error. */
6548 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6549 #else
6550 /* Return value is a biased priority to avoid negative numbers. */
6551 ret = 20 - ret;
6552 #endif
6553 break;
6554 case TARGET_NR_setpriority:
6555 ret = get_errno(setpriority(arg1, arg2, arg3));
6556 break;
6557 #ifdef TARGET_NR_profil
6558 case TARGET_NR_profil:
6559 goto unimplemented;
6560 #endif
6561 case TARGET_NR_statfs:
6562 if (!(p = lock_user_string(arg1)))
6563 goto efault;
6564 ret = get_errno(statfs(path(p), &stfs));
6565 unlock_user(p, arg1, 0);
6566 convert_statfs:
6567 if (!is_error(ret)) {
6568 struct target_statfs *target_stfs;
6569
6570 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6571 goto efault;
6572 __put_user(stfs.f_type, &target_stfs->f_type);
6573 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6574 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6575 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6576 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6577 __put_user(stfs.f_files, &target_stfs->f_files);
6578 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6579 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6580 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6581 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6582 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6583 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6584 unlock_user_struct(target_stfs, arg2, 1);
6585 }
6586 break;
6587 case TARGET_NR_fstatfs:
6588 ret = get_errno(fstatfs(arg1, &stfs));
6589 goto convert_statfs;
6590 #ifdef TARGET_NR_statfs64
6591 case TARGET_NR_statfs64:
6592 if (!(p = lock_user_string(arg1)))
6593 goto efault;
6594 ret = get_errno(statfs(path(p), &stfs));
6595 unlock_user(p, arg1, 0);
6596 convert_statfs64:
6597 if (!is_error(ret)) {
6598 struct target_statfs64 *target_stfs;
6599
6600 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6601 goto efault;
6602 __put_user(stfs.f_type, &target_stfs->f_type);
6603 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6604 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6605 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6606 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6607 __put_user(stfs.f_files, &target_stfs->f_files);
6608 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6609 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6610 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6611 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6612 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6613 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6614 unlock_user_struct(target_stfs, arg3, 1);
6615 }
6616 break;
6617 case TARGET_NR_fstatfs64:
6618 ret = get_errno(fstatfs(arg1, &stfs));
6619 goto convert_statfs64;
6620 #endif
6621 #ifdef TARGET_NR_ioperm
6622 case TARGET_NR_ioperm:
6623 goto unimplemented;
6624 #endif
6625 #ifdef TARGET_NR_socketcall
6626 case TARGET_NR_socketcall:
6627 ret = do_socketcall(arg1, arg2);
6628 break;
6629 #endif
6630 #ifdef TARGET_NR_accept
6631 case TARGET_NR_accept:
6632 ret = do_accept(arg1, arg2, arg3);
6633 break;
6634 #endif
6635 #ifdef TARGET_NR_bind
6636 case TARGET_NR_bind:
6637 ret = do_bind(arg1, arg2, arg3);
6638 break;
6639 #endif
6640 #ifdef TARGET_NR_connect
6641 case TARGET_NR_connect:
6642 ret = do_connect(arg1, arg2, arg3);
6643 break;
6644 #endif
6645 #ifdef TARGET_NR_getpeername
6646 case TARGET_NR_getpeername:
6647 ret = do_getpeername(arg1, arg2, arg3);
6648 break;
6649 #endif
6650 #ifdef TARGET_NR_getsockname
6651 case TARGET_NR_getsockname:
6652 ret = do_getsockname(arg1, arg2, arg3);
6653 break;
6654 #endif
6655 #ifdef TARGET_NR_getsockopt
6656 case TARGET_NR_getsockopt:
6657 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6658 break;
6659 #endif
6660 #ifdef TARGET_NR_listen
6661 case TARGET_NR_listen:
6662 ret = get_errno(listen(arg1, arg2));
6663 break;
6664 #endif
6665 #ifdef TARGET_NR_recv
6666 case TARGET_NR_recv:
6667 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6668 break;
6669 #endif
6670 #ifdef TARGET_NR_recvfrom
6671 case TARGET_NR_recvfrom:
6672 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6673 break;
6674 #endif
6675 #ifdef TARGET_NR_recvmsg
6676 case TARGET_NR_recvmsg:
6677 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6678 break;
6679 #endif
6680 #ifdef TARGET_NR_send
6681 case TARGET_NR_send:
6682 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6683 break;
6684 #endif
6685 #ifdef TARGET_NR_sendmsg
6686 case TARGET_NR_sendmsg:
6687 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6688 break;
6689 #endif
6690 #ifdef TARGET_NR_sendto
6691 case TARGET_NR_sendto:
6692 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6693 break;
6694 #endif
6695 #ifdef TARGET_NR_shutdown
6696 case TARGET_NR_shutdown:
6697 ret = get_errno(shutdown(arg1, arg2));
6698 break;
6699 #endif
6700 #ifdef TARGET_NR_socket
6701 case TARGET_NR_socket:
6702 ret = do_socket(arg1, arg2, arg3);
6703 break;
6704 #endif
6705 #ifdef TARGET_NR_socketpair
6706 case TARGET_NR_socketpair:
6707 ret = do_socketpair(arg1, arg2, arg3, arg4);
6708 break;
6709 #endif
6710 #ifdef TARGET_NR_setsockopt
6711 case TARGET_NR_setsockopt:
6712 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6713 break;
6714 #endif
6715
6716 case TARGET_NR_syslog:
6717 if (!(p = lock_user_string(arg2)))
6718 goto efault;
6719 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6720 unlock_user(p, arg2, 0);
6721 break;
6722
6723 case TARGET_NR_setitimer:
6724 {
6725 struct itimerval value, ovalue, *pvalue;
6726
6727 if (arg2) {
6728 pvalue = &value;
6729 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6730 || copy_from_user_timeval(&pvalue->it_value,
6731 arg2 + sizeof(struct target_timeval)))
6732 goto efault;
6733 } else {
6734 pvalue = NULL;
6735 }
6736 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6737 if (!is_error(ret) && arg3) {
6738 if (copy_to_user_timeval(arg3,
6739 &ovalue.it_interval)
6740 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6741 &ovalue.it_value))
6742 goto efault;
6743 }
6744 }
6745 break;
6746 case TARGET_NR_getitimer:
6747 {
6748 struct itimerval value;
6749
6750 ret = get_errno(getitimer(arg1, &value));
6751 if (!is_error(ret) && arg2) {
6752 if (copy_to_user_timeval(arg2,
6753 &value.it_interval)
6754 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6755 &value.it_value))
6756 goto efault;
6757 }
6758 }
6759 break;
6760 case TARGET_NR_stat:
6761 if (!(p = lock_user_string(arg1)))
6762 goto efault;
6763 ret = get_errno(stat(path(p), &st));
6764 unlock_user(p, arg1, 0);
6765 goto do_stat;
6766 case TARGET_NR_lstat:
6767 if (!(p = lock_user_string(arg1)))
6768 goto efault;
6769 ret = get_errno(lstat(path(p), &st));
6770 unlock_user(p, arg1, 0);
6771 goto do_stat;
6772 case TARGET_NR_fstat:
6773 {
6774 ret = get_errno(fstat(arg1, &st));
6775 do_stat:
6776 if (!is_error(ret)) {
6777 struct target_stat *target_st;
6778
6779 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6780 goto efault;
6781 memset(target_st, 0, sizeof(*target_st));
6782 __put_user(st.st_dev, &target_st->st_dev);
6783 __put_user(st.st_ino, &target_st->st_ino);
6784 __put_user(st.st_mode, &target_st->st_mode);
6785 __put_user(st.st_uid, &target_st->st_uid);
6786 __put_user(st.st_gid, &target_st->st_gid);
6787 __put_user(st.st_nlink, &target_st->st_nlink);
6788 __put_user(st.st_rdev, &target_st->st_rdev);
6789 __put_user(st.st_size, &target_st->st_size);
6790 __put_user(st.st_blksize, &target_st->st_blksize);
6791 __put_user(st.st_blocks, &target_st->st_blocks);
6792 __put_user(st.st_atime, &target_st->target_st_atime);
6793 __put_user(st.st_mtime, &target_st->target_st_mtime);
6794 __put_user(st.st_ctime, &target_st->target_st_ctime);
6795 unlock_user_struct(target_st, arg2, 1);
6796 }
6797 }
6798 break;
6799 #ifdef TARGET_NR_olduname
6800 case TARGET_NR_olduname:
6801 goto unimplemented;
6802 #endif
6803 #ifdef TARGET_NR_iopl
6804 case TARGET_NR_iopl:
6805 goto unimplemented;
6806 #endif
6807 case TARGET_NR_vhangup:
6808 ret = get_errno(vhangup());
6809 break;
6810 #ifdef TARGET_NR_idle
6811 case TARGET_NR_idle:
6812 goto unimplemented;
6813 #endif
6814 #ifdef TARGET_NR_syscall
6815 case TARGET_NR_syscall:
6816 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6817 arg6, arg7, arg8, 0);
6818 break;
6819 #endif
6820 case TARGET_NR_wait4:
6821 {
6822 int status;
6823 abi_long status_ptr = arg2;
6824 struct rusage rusage, *rusage_ptr;
6825 abi_ulong target_rusage = arg4;
6826 if (target_rusage)
6827 rusage_ptr = &rusage;
6828 else
6829 rusage_ptr = NULL;
6830 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6831 if (!is_error(ret)) {
6832 if (status_ptr && ret) {
6833 status = host_to_target_waitstatus(status);
6834 if (put_user_s32(status, status_ptr))
6835 goto efault;
6836 }
6837 if (target_rusage)
6838 host_to_target_rusage(target_rusage, &rusage);
6839 }
6840 }
6841 break;
6842 #ifdef TARGET_NR_swapoff
6843 case TARGET_NR_swapoff:
6844 if (!(p = lock_user_string(arg1)))
6845 goto efault;
6846 ret = get_errno(swapoff(p));
6847 unlock_user(p, arg1, 0);
6848 break;
6849 #endif
6850 case TARGET_NR_sysinfo:
6851 {
6852 struct target_sysinfo *target_value;
6853 struct sysinfo value;
6854 ret = get_errno(sysinfo(&value));
6855 if (!is_error(ret) && arg1)
6856 {
6857 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6858 goto efault;
6859 __put_user(value.uptime, &target_value->uptime);
6860 __put_user(value.loads[0], &target_value->loads[0]);
6861 __put_user(value.loads[1], &target_value->loads[1]);
6862 __put_user(value.loads[2], &target_value->loads[2]);
6863 __put_user(value.totalram, &target_value->totalram);
6864 __put_user(value.freeram, &target_value->freeram);
6865 __put_user(value.sharedram, &target_value->sharedram);
6866 __put_user(value.bufferram, &target_value->bufferram);
6867 __put_user(value.totalswap, &target_value->totalswap);
6868 __put_user(value.freeswap, &target_value->freeswap);
6869 __put_user(value.procs, &target_value->procs);
6870 __put_user(value.totalhigh, &target_value->totalhigh);
6871 __put_user(value.freehigh, &target_value->freehigh);
6872 __put_user(value.mem_unit, &target_value->mem_unit);
6873 unlock_user_struct(target_value, arg1, 1);
6874 }
6875 }
6876 break;
6877 #ifdef TARGET_NR_ipc
6878 case TARGET_NR_ipc:
6879 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6880 break;
6881 #endif
6882 #ifdef TARGET_NR_semget
6883 case TARGET_NR_semget:
6884 ret = get_errno(semget(arg1, arg2, arg3));
6885 break;
6886 #endif
6887 #ifdef TARGET_NR_semop
6888 case TARGET_NR_semop:
6889 ret = get_errno(do_semop(arg1, arg2, arg3));
6890 break;
6891 #endif
6892 #ifdef TARGET_NR_semctl
6893 case TARGET_NR_semctl:
6894 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6895 break;
6896 #endif
6897 #ifdef TARGET_NR_msgctl
6898 case TARGET_NR_msgctl:
6899 ret = do_msgctl(arg1, arg2, arg3);
6900 break;
6901 #endif
6902 #ifdef TARGET_NR_msgget
6903 case TARGET_NR_msgget:
6904 ret = get_errno(msgget(arg1, arg2));
6905 break;
6906 #endif
6907 #ifdef TARGET_NR_msgrcv
6908 case TARGET_NR_msgrcv:
6909 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6910 break;
6911 #endif
6912 #ifdef TARGET_NR_msgsnd
6913 case TARGET_NR_msgsnd:
6914 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6915 break;
6916 #endif
6917 #ifdef TARGET_NR_shmget
6918 case TARGET_NR_shmget:
6919 ret = get_errno(shmget(arg1, arg2, arg3));
6920 break;
6921 #endif
6922 #ifdef TARGET_NR_shmctl
6923 case TARGET_NR_shmctl:
6924 ret = do_shmctl(arg1, arg2, arg3);
6925 break;
6926 #endif
6927 #ifdef TARGET_NR_shmat
6928 case TARGET_NR_shmat:
6929 ret = do_shmat(arg1, arg2, arg3);
6930 break;
6931 #endif
6932 #ifdef TARGET_NR_shmdt
6933 case TARGET_NR_shmdt:
6934 ret = do_shmdt(arg1);
6935 break;
6936 #endif
6937 case TARGET_NR_fsync:
6938 ret = get_errno(fsync(arg1));
6939 break;
6940 case TARGET_NR_clone:
6941 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6942 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6943 #elif defined(TARGET_CRIS)
6944 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6945 #elif defined(TARGET_MICROBLAZE)
6946 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6947 #elif defined(TARGET_S390X)
6948 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6949 #else
6950 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6951 #endif
6952 break;
6953 #ifdef __NR_exit_group
6954 /* new thread calls */
6955 case TARGET_NR_exit_group:
6956 #ifdef TARGET_GPROF
6957 _mcleanup();
6958 #endif
6959 gdb_exit(cpu_env, arg1);
6960 ret = get_errno(exit_group(arg1));
6961 break;
6962 #endif
6963 case TARGET_NR_setdomainname:
6964 if (!(p = lock_user_string(arg1)))
6965 goto efault;
6966 ret = get_errno(setdomainname(p, arg2));
6967 unlock_user(p, arg1, 0);
6968 break;
6969 case TARGET_NR_uname:
6970 /* no need to transcode because we use the linux syscall */
6971 {
6972 struct new_utsname * buf;
6973
6974 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6975 goto efault;
6976 ret = get_errno(sys_uname(buf));
6977 if (!is_error(ret)) {
6978 /* Overrite the native machine name with whatever is being
6979 emulated. */
6980 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6981 /* Allow the user to override the reported release. */
6982 if (qemu_uname_release && *qemu_uname_release)
6983 strcpy (buf->release, qemu_uname_release);
6984 }
6985 unlock_user_struct(buf, arg1, 1);
6986 }
6987 break;
6988 #ifdef TARGET_I386
6989 case TARGET_NR_modify_ldt:
6990 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6991 break;
6992 #if !defined(TARGET_X86_64)
6993 case TARGET_NR_vm86old:
6994 goto unimplemented;
6995 case TARGET_NR_vm86:
6996 ret = do_vm86(cpu_env, arg1, arg2);
6997 break;
6998 #endif
6999 #endif
7000 case TARGET_NR_adjtimex:
7001 goto unimplemented;
7002 #ifdef TARGET_NR_create_module
7003 case TARGET_NR_create_module:
7004 #endif
7005 case TARGET_NR_init_module:
7006 case TARGET_NR_delete_module:
7007 #ifdef TARGET_NR_get_kernel_syms
7008 case TARGET_NR_get_kernel_syms:
7009 #endif
7010 goto unimplemented;
7011 case TARGET_NR_quotactl:
7012 goto unimplemented;
7013 case TARGET_NR_getpgid:
7014 ret = get_errno(getpgid(arg1));
7015 break;
7016 case TARGET_NR_fchdir:
7017 ret = get_errno(fchdir(arg1));
7018 break;
7019 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7020 case TARGET_NR_bdflush:
7021 goto unimplemented;
7022 #endif
7023 #ifdef TARGET_NR_sysfs
7024 case TARGET_NR_sysfs:
7025 goto unimplemented;
7026 #endif
7027 case TARGET_NR_personality:
7028 ret = get_errno(personality(arg1));
7029 break;
7030 #ifdef TARGET_NR_afs_syscall
7031 case TARGET_NR_afs_syscall:
7032 goto unimplemented;
7033 #endif
7034 #ifdef TARGET_NR__llseek /* Not on alpha */
7035 case TARGET_NR__llseek:
7036 {
7037 int64_t res;
7038 #if !defined(__NR_llseek)
7039 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7040 if (res == -1) {
7041 ret = get_errno(res);
7042 } else {
7043 ret = 0;
7044 }
7045 #else
7046 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7047 #endif
7048 if ((ret == 0) && put_user_s64(res, arg4)) {
7049 goto efault;
7050 }
7051 }
7052 break;
7053 #endif
7054 case TARGET_NR_getdents:
7055 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7056 {
7057 struct target_dirent *target_dirp;
7058 struct linux_dirent *dirp;
7059 abi_long count = arg3;
7060
7061 dirp = malloc(count);
7062 if (!dirp) {
7063 ret = -TARGET_ENOMEM;
7064 goto fail;
7065 }
7066
7067 ret = get_errno(sys_getdents(arg1, dirp, count));
7068 if (!is_error(ret)) {
7069 struct linux_dirent *de;
7070 struct target_dirent *tde;
7071 int len = ret;
7072 int reclen, treclen;
7073 int count1, tnamelen;
7074
7075 count1 = 0;
7076 de = dirp;
7077 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7078 goto efault;
7079 tde = target_dirp;
7080 while (len > 0) {
7081 reclen = de->d_reclen;
7082 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7083 assert(tnamelen >= 0);
7084 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7085 assert(count1 + treclen <= count);
7086 tde->d_reclen = tswap16(treclen);
7087 tde->d_ino = tswapal(de->d_ino);
7088 tde->d_off = tswapal(de->d_off);
7089 memcpy(tde->d_name, de->d_name, tnamelen);
7090 de = (struct linux_dirent *)((char *)de + reclen);
7091 len -= reclen;
7092 tde = (struct target_dirent *)((char *)tde + treclen);
7093 count1 += treclen;
7094 }
7095 ret = count1;
7096 unlock_user(target_dirp, arg2, ret);
7097 }
7098 free(dirp);
7099 }
7100 #else
7101 {
7102 struct linux_dirent *dirp;
7103 abi_long count = arg3;
7104
7105 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7106 goto efault;
7107 ret = get_errno(sys_getdents(arg1, dirp, count));
7108 if (!is_error(ret)) {
7109 struct linux_dirent *de;
7110 int len = ret;
7111 int reclen;
7112 de = dirp;
7113 while (len > 0) {
7114 reclen = de->d_reclen;
7115 if (reclen > len)
7116 break;
7117 de->d_reclen = tswap16(reclen);
7118 tswapls(&de->d_ino);
7119 tswapls(&de->d_off);
7120 de = (struct linux_dirent *)((char *)de + reclen);
7121 len -= reclen;
7122 }
7123 }
7124 unlock_user(dirp, arg2, ret);
7125 }
7126 #endif
7127 break;
7128 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7129 case TARGET_NR_getdents64:
7130 {
7131 struct linux_dirent64 *dirp;
7132 abi_long count = arg3;
7133 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7134 goto efault;
7135 ret = get_errno(sys_getdents64(arg1, dirp, count));
7136 if (!is_error(ret)) {
7137 struct linux_dirent64 *de;
7138 int len = ret;
7139 int reclen;
7140 de = dirp;
7141 while (len > 0) {
7142 reclen = de->d_reclen;
7143 if (reclen > len)
7144 break;
7145 de->d_reclen = tswap16(reclen);
7146 tswap64s((uint64_t *)&de->d_ino);
7147 tswap64s((uint64_t *)&de->d_off);
7148 de = (struct linux_dirent64 *)((char *)de + reclen);
7149 len -= reclen;
7150 }
7151 }
7152 unlock_user(dirp, arg2, ret);
7153 }
7154 break;
7155 #endif /* TARGET_NR_getdents64 */
7156 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
7157 #ifdef TARGET_S390X
7158 case TARGET_NR_select:
7159 #else
7160 case TARGET_NR__newselect:
7161 #endif
7162 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7163 break;
7164 #endif
7165 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7166 # ifdef TARGET_NR_poll
7167 case TARGET_NR_poll:
7168 # endif
7169 # ifdef TARGET_NR_ppoll
7170 case TARGET_NR_ppoll:
7171 # endif
7172 {
7173 struct target_pollfd *target_pfd;
7174 unsigned int nfds = arg2;
7175 int timeout = arg3;
7176 struct pollfd *pfd;
7177 unsigned int i;
7178
7179 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7180 if (!target_pfd)
7181 goto efault;
7182
7183 pfd = alloca(sizeof(struct pollfd) * nfds);
7184 for(i = 0; i < nfds; i++) {
7185 pfd[i].fd = tswap32(target_pfd[i].fd);
7186 pfd[i].events = tswap16(target_pfd[i].events);
7187 }
7188
7189 # ifdef TARGET_NR_ppoll
7190 if (num == TARGET_NR_ppoll) {
7191 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7192 target_sigset_t *target_set;
7193 sigset_t _set, *set = &_set;
7194
7195 if (arg3) {
7196 if (target_to_host_timespec(timeout_ts, arg3)) {
7197 unlock_user(target_pfd, arg1, 0);
7198 goto efault;
7199 }
7200 } else {
7201 timeout_ts = NULL;
7202 }
7203
7204 if (arg4) {
7205 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7206 if (!target_set) {
7207 unlock_user(target_pfd, arg1, 0);
7208 goto efault;
7209 }
7210 target_to_host_sigset(set, target_set);
7211 } else {
7212 set = NULL;
7213 }
7214
7215 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7216
7217 if (!is_error(ret) && arg3) {
7218 host_to_target_timespec(arg3, timeout_ts);
7219 }
7220 if (arg4) {
7221 unlock_user(target_set, arg4, 0);
7222 }
7223 } else
7224 # endif
7225 ret = get_errno(poll(pfd, nfds, timeout));
7226
7227 if (!is_error(ret)) {
7228 for(i = 0; i < nfds; i++) {
7229 target_pfd[i].revents = tswap16(pfd[i].revents);
7230 }
7231 }
7232 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7233 }
7234 break;
7235 #endif
7236 case TARGET_NR_flock:
7237 /* NOTE: the flock constant seems to be the same for every
7238 Linux platform */
7239 ret = get_errno(flock(arg1, arg2));
7240 break;
7241 case TARGET_NR_readv:
7242 {
7243 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7244 if (vec != NULL) {
7245 ret = get_errno(readv(arg1, vec, arg3));
7246 unlock_iovec(vec, arg2, arg3, 1);
7247 } else {
7248 ret = -host_to_target_errno(errno);
7249 }
7250 }
7251 break;
7252 case TARGET_NR_writev:
7253 {
7254 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7255 if (vec != NULL) {
7256 ret = get_errno(writev(arg1, vec, arg3));
7257 unlock_iovec(vec, arg2, arg3, 0);
7258 } else {
7259 ret = -host_to_target_errno(errno);
7260 }
7261 }
7262 break;
7263 case TARGET_NR_getsid:
7264 ret = get_errno(getsid(arg1));
7265 break;
7266 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7267 case TARGET_NR_fdatasync:
7268 ret = get_errno(fdatasync(arg1));
7269 break;
7270 #endif
7271 case TARGET_NR__sysctl:
7272 /* We don't implement this, but ENOTDIR is always a safe
7273 return value. */
7274 ret = -TARGET_ENOTDIR;
7275 break;
7276 case TARGET_NR_sched_getaffinity:
7277 {
7278 unsigned int mask_size;
7279 unsigned long *mask;
7280
7281 /*
7282 * sched_getaffinity needs multiples of ulong, so need to take
7283 * care of mismatches between target ulong and host ulong sizes.
7284 */
7285 if (arg2 & (sizeof(abi_ulong) - 1)) {
7286 ret = -TARGET_EINVAL;
7287 break;
7288 }
7289 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7290
7291 mask = alloca(mask_size);
7292 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7293
7294 if (!is_error(ret)) {
7295 if (copy_to_user(arg3, mask, ret)) {
7296 goto efault;
7297 }
7298 }
7299 }
7300 break;
7301 case TARGET_NR_sched_setaffinity:
7302 {
7303 unsigned int mask_size;
7304 unsigned long *mask;
7305
7306 /*
7307 * sched_setaffinity needs multiples of ulong, so need to take
7308 * care of mismatches between target ulong and host ulong sizes.
7309 */
7310 if (arg2 & (sizeof(abi_ulong) - 1)) {
7311 ret = -TARGET_EINVAL;
7312 break;
7313 }
7314 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7315
7316 mask = alloca(mask_size);
7317 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7318 goto efault;
7319 }
7320 memcpy(mask, p, arg2);
7321 unlock_user_struct(p, arg2, 0);
7322
7323 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7324 }
7325 break;
7326 case TARGET_NR_sched_setparam:
7327 {
7328 struct sched_param *target_schp;
7329 struct sched_param schp;
7330
7331 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7332 goto efault;
7333 schp.sched_priority = tswap32(target_schp->sched_priority);
7334 unlock_user_struct(target_schp, arg2, 0);
7335 ret = get_errno(sched_setparam(arg1, &schp));
7336 }
7337 break;
7338 case TARGET_NR_sched_getparam:
7339 {
7340 struct sched_param *target_schp;
7341 struct sched_param schp;
7342 ret = get_errno(sched_getparam(arg1, &schp));
7343 if (!is_error(ret)) {
7344 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7345 goto efault;
7346 target_schp->sched_priority = tswap32(schp.sched_priority);
7347 unlock_user_struct(target_schp, arg2, 1);
7348 }
7349 }
7350 break;
7351 case TARGET_NR_sched_setscheduler:
7352 {
7353 struct sched_param *target_schp;
7354 struct sched_param schp;
7355 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7356 goto efault;
7357 schp.sched_priority = tswap32(target_schp->sched_priority);
7358 unlock_user_struct(target_schp, arg3, 0);
7359 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7360 }
7361 break;
7362 case TARGET_NR_sched_getscheduler:
7363 ret = get_errno(sched_getscheduler(arg1));
7364 break;
7365 case TARGET_NR_sched_yield:
7366 ret = get_errno(sched_yield());
7367 break;
7368 case TARGET_NR_sched_get_priority_max:
7369 ret = get_errno(sched_get_priority_max(arg1));
7370 break;
7371 case TARGET_NR_sched_get_priority_min:
7372 ret = get_errno(sched_get_priority_min(arg1));
7373 break;
7374 case TARGET_NR_sched_rr_get_interval:
7375 {
7376 struct timespec ts;
7377 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7378 if (!is_error(ret)) {
7379 host_to_target_timespec(arg2, &ts);
7380 }
7381 }
7382 break;
7383 case TARGET_NR_nanosleep:
7384 {
7385 struct timespec req, rem;
7386 target_to_host_timespec(&req, arg1);
7387 ret = get_errno(nanosleep(&req, &rem));
7388 if (is_error(ret) && arg2) {
7389 host_to_target_timespec(arg2, &rem);
7390 }
7391 }
7392 break;
7393 #ifdef TARGET_NR_query_module
7394 case TARGET_NR_query_module:
7395 goto unimplemented;
7396 #endif
7397 #ifdef TARGET_NR_nfsservctl
7398 case TARGET_NR_nfsservctl:
7399 goto unimplemented;
7400 #endif
7401 case TARGET_NR_prctl:
7402 switch (arg1) {
7403 case PR_GET_PDEATHSIG:
7404 {
7405 int deathsig;
7406 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7407 if (!is_error(ret) && arg2
7408 && put_user_ual(deathsig, arg2)) {
7409 goto efault;
7410 }
7411 break;
7412 }
7413 #ifdef PR_GET_NAME
7414 case PR_GET_NAME:
7415 {
7416 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7417 if (!name) {
7418 goto efault;
7419 }
7420 ret = get_errno(prctl(arg1, (unsigned long)name,
7421 arg3, arg4, arg5));
7422 unlock_user(name, arg2, 16);
7423 break;
7424 }
7425 case PR_SET_NAME:
7426 {
7427 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7428 if (!name) {
7429 goto efault;
7430 }
7431 ret = get_errno(prctl(arg1, (unsigned long)name,
7432 arg3, arg4, arg5));
7433 unlock_user(name, arg2, 0);
7434 break;
7435 }
7436 #endif
7437 default:
7438 /* Most prctl options have no pointer arguments */
7439 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7440 break;
7441 }
7442 break;
7443 #ifdef TARGET_NR_arch_prctl
7444 case TARGET_NR_arch_prctl:
7445 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7446 ret = do_arch_prctl(cpu_env, arg1, arg2);
7447 break;
7448 #else
7449 goto unimplemented;
7450 #endif
7451 #endif
7452 #ifdef TARGET_NR_pread
7453 case TARGET_NR_pread:
7454 if (regpairs_aligned(cpu_env))
7455 arg4 = arg5;
7456 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7457 goto efault;
7458 ret = get_errno(pread(arg1, p, arg3, arg4));
7459 unlock_user(p, arg2, ret);
7460 break;
7461 case TARGET_NR_pwrite:
7462 if (regpairs_aligned(cpu_env))
7463 arg4 = arg5;
7464 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7465 goto efault;
7466 ret = get_errno(pwrite(arg1, p, arg3, arg4));
7467 unlock_user(p, arg2, 0);
7468 break;
7469 #endif
7470 #ifdef TARGET_NR_pread64
7471 case TARGET_NR_pread64:
7472 if (regpairs_aligned(cpu_env)) {
7473 arg4 = arg5;
7474 arg5 = arg6;
7475 }
7476 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7477 goto efault;
7478 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7479 unlock_user(p, arg2, ret);
7480 break;
7481 case TARGET_NR_pwrite64:
7482 if (regpairs_aligned(cpu_env)) {
7483 arg4 = arg5;
7484 arg5 = arg6;
7485 }
7486 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7487 goto efault;
7488 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7489 unlock_user(p, arg2, 0);
7490 break;
7491 #endif
7492 case TARGET_NR_getcwd:
7493 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7494 goto efault;
7495 ret = get_errno(sys_getcwd1(p, arg2));
7496 unlock_user(p, arg1, ret);
7497 break;
7498 case TARGET_NR_capget:
7499 goto unimplemented;
7500 case TARGET_NR_capset:
7501 goto unimplemented;
7502 case TARGET_NR_sigaltstack:
7503 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7504 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7505 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7506 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7507 break;
7508 #else
7509 goto unimplemented;
7510 #endif
7511 case TARGET_NR_sendfile:
7512 goto unimplemented;
7513 #ifdef TARGET_NR_getpmsg
7514 case TARGET_NR_getpmsg:
7515 goto unimplemented;
7516 #endif
7517 #ifdef TARGET_NR_putpmsg
7518 case TARGET_NR_putpmsg:
7519 goto unimplemented;
7520 #endif
7521 #ifdef TARGET_NR_vfork
7522 case TARGET_NR_vfork:
7523 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7524 0, 0, 0, 0));
7525 break;
7526 #endif
7527 #ifdef TARGET_NR_ugetrlimit
7528 case TARGET_NR_ugetrlimit:
7529 {
7530 struct rlimit rlim;
7531 int resource = target_to_host_resource(arg1);
7532 ret = get_errno(getrlimit(resource, &rlim));
7533 if (!is_error(ret)) {
7534 struct target_rlimit *target_rlim;
7535 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7536 goto efault;
7537 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7538 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7539 unlock_user_struct(target_rlim, arg2, 1);
7540 }
7541 break;
7542 }
7543 #endif
7544 #ifdef TARGET_NR_truncate64
7545 case TARGET_NR_truncate64:
7546 if (!(p = lock_user_string(arg1)))
7547 goto efault;
7548 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7549 unlock_user(p, arg1, 0);
7550 break;
7551 #endif
7552 #ifdef TARGET_NR_ftruncate64
7553 case TARGET_NR_ftruncate64:
7554 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7555 break;
7556 #endif
7557 #ifdef TARGET_NR_stat64
7558 case TARGET_NR_stat64:
7559 if (!(p = lock_user_string(arg1)))
7560 goto efault;
7561 ret = get_errno(stat(path(p), &st));
7562 unlock_user(p, arg1, 0);
7563 if (!is_error(ret))
7564 ret = host_to_target_stat64(cpu_env, arg2, &st);
7565 break;
7566 #endif
7567 #ifdef TARGET_NR_lstat64
7568 case TARGET_NR_lstat64:
7569 if (!(p = lock_user_string(arg1)))
7570 goto efault;
7571 ret = get_errno(lstat(path(p), &st));
7572 unlock_user(p, arg1, 0);
7573 if (!is_error(ret))
7574 ret = host_to_target_stat64(cpu_env, arg2, &st);
7575 break;
7576 #endif
7577 #ifdef TARGET_NR_fstat64
7578 case TARGET_NR_fstat64:
7579 ret = get_errno(fstat(arg1, &st));
7580 if (!is_error(ret))
7581 ret = host_to_target_stat64(cpu_env, arg2, &st);
7582 break;
7583 #endif
7584 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7585 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7586 #ifdef TARGET_NR_fstatat64
7587 case TARGET_NR_fstatat64:
7588 #endif
7589 #ifdef TARGET_NR_newfstatat
7590 case TARGET_NR_newfstatat:
7591 #endif
7592 if (!(p = lock_user_string(arg2)))
7593 goto efault;
7594 #ifdef __NR_fstatat64
7595 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7596 #else
7597 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7598 #endif
7599 if (!is_error(ret))
7600 ret = host_to_target_stat64(cpu_env, arg3, &st);
7601 break;
7602 #endif
7603 case TARGET_NR_lchown:
7604 if (!(p = lock_user_string(arg1)))
7605 goto efault;
7606 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7607 unlock_user(p, arg1, 0);
7608 break;
7609 #ifdef TARGET_NR_getuid
7610 case TARGET_NR_getuid:
7611 ret = get_errno(high2lowuid(getuid()));
7612 break;
7613 #endif
7614 #ifdef TARGET_NR_getgid
7615 case TARGET_NR_getgid:
7616 ret = get_errno(high2lowgid(getgid()));
7617 break;
7618 #endif
7619 #ifdef TARGET_NR_geteuid
7620 case TARGET_NR_geteuid:
7621 ret = get_errno(high2lowuid(geteuid()));
7622 break;
7623 #endif
7624 #ifdef TARGET_NR_getegid
7625 case TARGET_NR_getegid:
7626 ret = get_errno(high2lowgid(getegid()));
7627 break;
7628 #endif
7629 case TARGET_NR_setreuid:
7630 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7631 break;
7632 case TARGET_NR_setregid:
7633 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7634 break;
7635 case TARGET_NR_getgroups:
7636 {
7637 int gidsetsize = arg1;
7638 target_id *target_grouplist;
7639 gid_t *grouplist;
7640 int i;
7641
7642 grouplist = alloca(gidsetsize * sizeof(gid_t));
7643 ret = get_errno(getgroups(gidsetsize, grouplist));
7644 if (gidsetsize == 0)
7645 break;
7646 if (!is_error(ret)) {
7647 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7648 if (!target_grouplist)
7649 goto efault;
7650 for(i = 0;i < ret; i++)
7651 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7652 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7653 }
7654 }
7655 break;
7656 case TARGET_NR_setgroups:
7657 {
7658 int gidsetsize = arg1;
7659 target_id *target_grouplist;
7660 gid_t *grouplist;
7661 int i;
7662
7663 grouplist = alloca(gidsetsize * sizeof(gid_t));
7664 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7665 if (!target_grouplist) {
7666 ret = -TARGET_EFAULT;
7667 goto fail;
7668 }
7669 for(i = 0;i < gidsetsize; i++)
7670 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7671 unlock_user(target_grouplist, arg2, 0);
7672 ret = get_errno(setgroups(gidsetsize, grouplist));
7673 }
7674 break;
7675 case TARGET_NR_fchown:
7676 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7677 break;
7678 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7679 case TARGET_NR_fchownat:
7680 if (!(p = lock_user_string(arg2)))
7681 goto efault;
7682 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7683 unlock_user(p, arg2, 0);
7684 break;
7685 #endif
7686 #ifdef TARGET_NR_setresuid
7687 case TARGET_NR_setresuid:
7688 ret = get_errno(setresuid(low2highuid(arg1),
7689 low2highuid(arg2),
7690 low2highuid(arg3)));
7691 break;
7692 #endif
7693 #ifdef TARGET_NR_getresuid
7694 case TARGET_NR_getresuid:
7695 {
7696 uid_t ruid, euid, suid;
7697 ret = get_errno(getresuid(&ruid, &euid, &suid));
7698 if (!is_error(ret)) {
7699 if (put_user_u16(high2lowuid(ruid), arg1)
7700 || put_user_u16(high2lowuid(euid), arg2)
7701 || put_user_u16(high2lowuid(suid), arg3))
7702 goto efault;
7703 }
7704 }
7705 break;
7706 #endif
7707 #ifdef TARGET_NR_getresgid
7708 case TARGET_NR_setresgid:
7709 ret = get_errno(setresgid(low2highgid(arg1),
7710 low2highgid(arg2),
7711 low2highgid(arg3)));
7712 break;
7713 #endif
7714 #ifdef TARGET_NR_getresgid
7715 case TARGET_NR_getresgid:
7716 {
7717 gid_t rgid, egid, sgid;
7718 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7719 if (!is_error(ret)) {
7720 if (put_user_u16(high2lowgid(rgid), arg1)
7721 || put_user_u16(high2lowgid(egid), arg2)
7722 || put_user_u16(high2lowgid(sgid), arg3))
7723 goto efault;
7724 }
7725 }
7726 break;
7727 #endif
7728 case TARGET_NR_chown:
7729 if (!(p = lock_user_string(arg1)))
7730 goto efault;
7731 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7732 unlock_user(p, arg1, 0);
7733 break;
7734 case TARGET_NR_setuid:
7735 ret = get_errno(setuid(low2highuid(arg1)));
7736 break;
7737 case TARGET_NR_setgid:
7738 ret = get_errno(setgid(low2highgid(arg1)));
7739 break;
7740 case TARGET_NR_setfsuid:
7741 ret = get_errno(setfsuid(arg1));
7742 break;
7743 case TARGET_NR_setfsgid:
7744 ret = get_errno(setfsgid(arg1));
7745 break;
7746
7747 #ifdef TARGET_NR_lchown32
7748 case TARGET_NR_lchown32:
7749 if (!(p = lock_user_string(arg1)))
7750 goto efault;
7751 ret = get_errno(lchown(p, arg2, arg3));
7752 unlock_user(p, arg1, 0);
7753 break;
7754 #endif
7755 #ifdef TARGET_NR_getuid32
7756 case TARGET_NR_getuid32:
7757 ret = get_errno(getuid());
7758 break;
7759 #endif
7760
7761 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7762 /* Alpha specific */
7763 case TARGET_NR_getxuid:
7764 {
7765 uid_t euid;
7766 euid=geteuid();
7767 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7768 }
7769 ret = get_errno(getuid());
7770 break;
7771 #endif
7772 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7773 /* Alpha specific */
7774 case TARGET_NR_getxgid:
7775 {
7776 uid_t egid;
7777 egid=getegid();
7778 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7779 }
7780 ret = get_errno(getgid());
7781 break;
7782 #endif
7783 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7784 /* Alpha specific */
7785 case TARGET_NR_osf_getsysinfo:
7786 ret = -TARGET_EOPNOTSUPP;
7787 switch (arg1) {
7788 case TARGET_GSI_IEEE_FP_CONTROL:
7789 {
7790 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7791
7792 /* Copied from linux ieee_fpcr_to_swcr. */
7793 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7794 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7795 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7796 | SWCR_TRAP_ENABLE_DZE
7797 | SWCR_TRAP_ENABLE_OVF);
7798 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7799 | SWCR_TRAP_ENABLE_INE);
7800 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7801 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7802
7803 if (put_user_u64 (swcr, arg2))
7804 goto efault;
7805 ret = 0;
7806 }
7807 break;
7808
7809 /* case GSI_IEEE_STATE_AT_SIGNAL:
7810 -- Not implemented in linux kernel.
7811 case GSI_UACPROC:
7812 -- Retrieves current unaligned access state; not much used.
7813 case GSI_PROC_TYPE:
7814 -- Retrieves implver information; surely not used.
7815 case GSI_GET_HWRPB:
7816 -- Grabs a copy of the HWRPB; surely not used.
7817 */
7818 }
7819 break;
7820 #endif
7821 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7822 /* Alpha specific */
7823 case TARGET_NR_osf_setsysinfo:
7824 ret = -TARGET_EOPNOTSUPP;
7825 switch (arg1) {
7826 case TARGET_SSI_IEEE_FP_CONTROL:
7827 {
7828 uint64_t swcr, fpcr, orig_fpcr;
7829
7830 if (get_user_u64 (swcr, arg2)) {
7831 goto efault;
7832 }
7833 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7834 fpcr = orig_fpcr & FPCR_DYN_MASK;
7835
7836 /* Copied from linux ieee_swcr_to_fpcr. */
7837 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7838 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7839 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7840 | SWCR_TRAP_ENABLE_DZE
7841 | SWCR_TRAP_ENABLE_OVF)) << 48;
7842 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7843 | SWCR_TRAP_ENABLE_INE)) << 57;
7844 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7845 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7846
7847 cpu_alpha_store_fpcr(cpu_env, fpcr);
7848 ret = 0;
7849 }
7850 break;
7851
7852 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7853 {
7854 uint64_t exc, fpcr, orig_fpcr;
7855 int si_code;
7856
7857 if (get_user_u64(exc, arg2)) {
7858 goto efault;
7859 }
7860
7861 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7862
7863 /* We only add to the exception status here. */
7864 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7865
7866 cpu_alpha_store_fpcr(cpu_env, fpcr);
7867 ret = 0;
7868
7869 /* Old exceptions are not signaled. */
7870 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7871
7872 /* If any exceptions set by this call,
7873 and are unmasked, send a signal. */
7874 si_code = 0;
7875 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7876 si_code = TARGET_FPE_FLTRES;
7877 }
7878 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7879 si_code = TARGET_FPE_FLTUND;
7880 }
7881 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7882 si_code = TARGET_FPE_FLTOVF;
7883 }
7884 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7885 si_code = TARGET_FPE_FLTDIV;
7886 }
7887 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7888 si_code = TARGET_FPE_FLTINV;
7889 }
7890 if (si_code != 0) {
7891 target_siginfo_t info;
7892 info.si_signo = SIGFPE;
7893 info.si_errno = 0;
7894 info.si_code = si_code;
7895 info._sifields._sigfault._addr
7896 = ((CPUArchState *)cpu_env)->pc;
7897 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7898 }
7899 }
7900 break;
7901
7902 /* case SSI_NVPAIRS:
7903 -- Used with SSIN_UACPROC to enable unaligned accesses.
7904 case SSI_IEEE_STATE_AT_SIGNAL:
7905 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7906 -- Not implemented in linux kernel
7907 */
7908 }
7909 break;
7910 #endif
7911 #ifdef TARGET_NR_osf_sigprocmask
7912 /* Alpha specific. */
7913 case TARGET_NR_osf_sigprocmask:
7914 {
7915 abi_ulong mask;
7916 int how;
7917 sigset_t set, oldset;
7918
7919 switch(arg1) {
7920 case TARGET_SIG_BLOCK:
7921 how = SIG_BLOCK;
7922 break;
7923 case TARGET_SIG_UNBLOCK:
7924 how = SIG_UNBLOCK;
7925 break;
7926 case TARGET_SIG_SETMASK:
7927 how = SIG_SETMASK;
7928 break;
7929 default:
7930 ret = -TARGET_EINVAL;
7931 goto fail;
7932 }
7933 mask = arg2;
7934 target_to_host_old_sigset(&set, &mask);
7935 sigprocmask(how, &set, &oldset);
7936 host_to_target_old_sigset(&mask, &oldset);
7937 ret = mask;
7938 }
7939 break;
7940 #endif
7941
7942 #ifdef TARGET_NR_getgid32
7943 case TARGET_NR_getgid32:
7944 ret = get_errno(getgid());
7945 break;
7946 #endif
7947 #ifdef TARGET_NR_geteuid32
7948 case TARGET_NR_geteuid32:
7949 ret = get_errno(geteuid());
7950 break;
7951 #endif
7952 #ifdef TARGET_NR_getegid32
7953 case TARGET_NR_getegid32:
7954 ret = get_errno(getegid());
7955 break;
7956 #endif
7957 #ifdef TARGET_NR_setreuid32
7958 case TARGET_NR_setreuid32:
7959 ret = get_errno(setreuid(arg1, arg2));
7960 break;
7961 #endif
7962 #ifdef TARGET_NR_setregid32
7963 case TARGET_NR_setregid32:
7964 ret = get_errno(setregid(arg1, arg2));
7965 break;
7966 #endif
7967 #ifdef TARGET_NR_getgroups32
7968 case TARGET_NR_getgroups32:
7969 {
7970 int gidsetsize = arg1;
7971 uint32_t *target_grouplist;
7972 gid_t *grouplist;
7973 int i;
7974
7975 grouplist = alloca(gidsetsize * sizeof(gid_t));
7976 ret = get_errno(getgroups(gidsetsize, grouplist));
7977 if (gidsetsize == 0)
7978 break;
7979 if (!is_error(ret)) {
7980 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7981 if (!target_grouplist) {
7982 ret = -TARGET_EFAULT;
7983 goto fail;
7984 }
7985 for(i = 0;i < ret; i++)
7986 target_grouplist[i] = tswap32(grouplist[i]);
7987 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7988 }
7989 }
7990 break;
7991 #endif
7992 #ifdef TARGET_NR_setgroups32
7993 case TARGET_NR_setgroups32:
7994 {
7995 int gidsetsize = arg1;
7996 uint32_t *target_grouplist;
7997 gid_t *grouplist;
7998 int i;
7999
8000 grouplist = alloca(gidsetsize * sizeof(gid_t));
8001 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8002 if (!target_grouplist) {
8003 ret = -TARGET_EFAULT;
8004 goto fail;
8005 }
8006 for(i = 0;i < gidsetsize; i++)
8007 grouplist[i] = tswap32(target_grouplist[i]);
8008 unlock_user(target_grouplist, arg2, 0);
8009 ret = get_errno(setgroups(gidsetsize, grouplist));
8010 }
8011 break;
8012 #endif
8013 #ifdef TARGET_NR_fchown32
8014 case TARGET_NR_fchown32:
8015 ret = get_errno(fchown(arg1, arg2, arg3));
8016 break;
8017 #endif
8018 #ifdef TARGET_NR_setresuid32
8019 case TARGET_NR_setresuid32:
8020 ret = get_errno(setresuid(arg1, arg2, arg3));
8021 break;
8022 #endif
8023 #ifdef TARGET_NR_getresuid32
8024 case TARGET_NR_getresuid32:
8025 {
8026 uid_t ruid, euid, suid;
8027 ret = get_errno(getresuid(&ruid, &euid, &suid));
8028 if (!is_error(ret)) {
8029 if (put_user_u32(ruid, arg1)
8030 || put_user_u32(euid, arg2)
8031 || put_user_u32(suid, arg3))
8032 goto efault;
8033 }
8034 }
8035 break;
8036 #endif
8037 #ifdef TARGET_NR_setresgid32
8038 case TARGET_NR_setresgid32:
8039 ret = get_errno(setresgid(arg1, arg2, arg3));
8040 break;
8041 #endif
8042 #ifdef TARGET_NR_getresgid32
8043 case TARGET_NR_getresgid32:
8044 {
8045 gid_t rgid, egid, sgid;
8046 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8047 if (!is_error(ret)) {
8048 if (put_user_u32(rgid, arg1)
8049 || put_user_u32(egid, arg2)
8050 || put_user_u32(sgid, arg3))
8051 goto efault;
8052 }
8053 }
8054 break;
8055 #endif
8056 #ifdef TARGET_NR_chown32
8057 case TARGET_NR_chown32:
8058 if (!(p = lock_user_string(arg1)))
8059 goto efault;
8060 ret = get_errno(chown(p, arg2, arg3));
8061 unlock_user(p, arg1, 0);
8062 break;
8063 #endif
8064 #ifdef TARGET_NR_setuid32
8065 case TARGET_NR_setuid32:
8066 ret = get_errno(setuid(arg1));
8067 break;
8068 #endif
8069 #ifdef TARGET_NR_setgid32
8070 case TARGET_NR_setgid32:
8071 ret = get_errno(setgid(arg1));
8072 break;
8073 #endif
8074 #ifdef TARGET_NR_setfsuid32
8075 case TARGET_NR_setfsuid32:
8076 ret = get_errno(setfsuid(arg1));
8077 break;
8078 #endif
8079 #ifdef TARGET_NR_setfsgid32
8080 case TARGET_NR_setfsgid32:
8081 ret = get_errno(setfsgid(arg1));
8082 break;
8083 #endif
8084
8085 case TARGET_NR_pivot_root:
8086 goto unimplemented;
8087 #ifdef TARGET_NR_mincore
8088 case TARGET_NR_mincore:
8089 {
8090 void *a;
8091 ret = -TARGET_EFAULT;
8092 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8093 goto efault;
8094 if (!(p = lock_user_string(arg3)))
8095 goto mincore_fail;
8096 ret = get_errno(mincore(a, arg2, p));
8097 unlock_user(p, arg3, ret);
8098 mincore_fail:
8099 unlock_user(a, arg1, 0);
8100 }
8101 break;
8102 #endif
8103 #ifdef TARGET_NR_arm_fadvise64_64
8104 case TARGET_NR_arm_fadvise64_64:
8105 {
8106 /*
8107 * arm_fadvise64_64 looks like fadvise64_64 but
8108 * with different argument order
8109 */
8110 abi_long temp;
8111 temp = arg3;
8112 arg3 = arg4;
8113 arg4 = temp;
8114 }
8115 #endif
8116 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8117 #ifdef TARGET_NR_fadvise64_64
8118 case TARGET_NR_fadvise64_64:
8119 #endif
8120 #ifdef TARGET_NR_fadvise64
8121 case TARGET_NR_fadvise64:
8122 #endif
8123 #ifdef TARGET_S390X
8124 switch (arg4) {
8125 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8126 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8127 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8128 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8129 default: break;
8130 }
8131 #endif
8132 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8133 break;
8134 #endif
8135 #ifdef TARGET_NR_madvise
8136 case TARGET_NR_madvise:
8137 /* A straight passthrough may not be safe because qemu sometimes
8138 turns private flie-backed mappings into anonymous mappings.
8139 This will break MADV_DONTNEED.
8140 This is a hint, so ignoring and returning success is ok. */
8141 ret = get_errno(0);
8142 break;
8143 #endif
8144 #if TARGET_ABI_BITS == 32
8145 case TARGET_NR_fcntl64:
8146 {
8147 int cmd;
8148 struct flock64 fl;
8149 struct target_flock64 *target_fl;
8150 #ifdef TARGET_ARM
8151 struct target_eabi_flock64 *target_efl;
8152 #endif
8153
8154 cmd = target_to_host_fcntl_cmd(arg2);
8155 if (cmd == -TARGET_EINVAL) {
8156 ret = cmd;
8157 break;
8158 }
8159
8160 switch(arg2) {
8161 case TARGET_F_GETLK64:
8162 #ifdef TARGET_ARM
8163 if (((CPUARMState *)cpu_env)->eabi) {
8164 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8165 goto efault;
8166 fl.l_type = tswap16(target_efl->l_type);
8167 fl.l_whence = tswap16(target_efl->l_whence);
8168 fl.l_start = tswap64(target_efl->l_start);
8169 fl.l_len = tswap64(target_efl->l_len);
8170 fl.l_pid = tswap32(target_efl->l_pid);
8171 unlock_user_struct(target_efl, arg3, 0);
8172 } else
8173 #endif
8174 {
8175 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8176 goto efault;
8177 fl.l_type = tswap16(target_fl->l_type);
8178 fl.l_whence = tswap16(target_fl->l_whence);
8179 fl.l_start = tswap64(target_fl->l_start);
8180 fl.l_len = tswap64(target_fl->l_len);
8181 fl.l_pid = tswap32(target_fl->l_pid);
8182 unlock_user_struct(target_fl, arg3, 0);
8183 }
8184 ret = get_errno(fcntl(arg1, cmd, &fl));
8185 if (ret == 0) {
8186 #ifdef TARGET_ARM
8187 if (((CPUARMState *)cpu_env)->eabi) {
8188 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8189 goto efault;
8190 target_efl->l_type = tswap16(fl.l_type);
8191 target_efl->l_whence = tswap16(fl.l_whence);
8192 target_efl->l_start = tswap64(fl.l_start);
8193 target_efl->l_len = tswap64(fl.l_len);
8194 target_efl->l_pid = tswap32(fl.l_pid);
8195 unlock_user_struct(target_efl, arg3, 1);
8196 } else
8197 #endif
8198 {
8199 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8200 goto efault;
8201 target_fl->l_type = tswap16(fl.l_type);
8202 target_fl->l_whence = tswap16(fl.l_whence);
8203 target_fl->l_start = tswap64(fl.l_start);
8204 target_fl->l_len = tswap64(fl.l_len);
8205 target_fl->l_pid = tswap32(fl.l_pid);
8206 unlock_user_struct(target_fl, arg3, 1);
8207 }
8208 }
8209 break;
8210
8211 case TARGET_F_SETLK64:
8212 case TARGET_F_SETLKW64:
8213 #ifdef TARGET_ARM
8214 if (((CPUARMState *)cpu_env)->eabi) {
8215 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8216 goto efault;
8217 fl.l_type = tswap16(target_efl->l_type);
8218 fl.l_whence = tswap16(target_efl->l_whence);
8219 fl.l_start = tswap64(target_efl->l_start);
8220 fl.l_len = tswap64(target_efl->l_len);
8221 fl.l_pid = tswap32(target_efl->l_pid);
8222 unlock_user_struct(target_efl, arg3, 0);
8223 } else
8224 #endif
8225 {
8226 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8227 goto efault;
8228 fl.l_type = tswap16(target_fl->l_type);
8229 fl.l_whence = tswap16(target_fl->l_whence);
8230 fl.l_start = tswap64(target_fl->l_start);
8231 fl.l_len = tswap64(target_fl->l_len);
8232 fl.l_pid = tswap32(target_fl->l_pid);
8233 unlock_user_struct(target_fl, arg3, 0);
8234 }
8235 ret = get_errno(fcntl(arg1, cmd, &fl));
8236 break;
8237 default:
8238 ret = do_fcntl(arg1, arg2, arg3);
8239 break;
8240 }
8241 break;
8242 }
8243 #endif
8244 #ifdef TARGET_NR_cacheflush
8245 case TARGET_NR_cacheflush:
8246 /* self-modifying code is handled automatically, so nothing needed */
8247 ret = 0;
8248 break;
8249 #endif
8250 #ifdef TARGET_NR_security
8251 case TARGET_NR_security:
8252 goto unimplemented;
8253 #endif
8254 #ifdef TARGET_NR_getpagesize
8255 case TARGET_NR_getpagesize:
8256 ret = TARGET_PAGE_SIZE;
8257 break;
8258 #endif
8259 case TARGET_NR_gettid:
8260 ret = get_errno(gettid());
8261 break;
8262 #ifdef TARGET_NR_readahead
8263 case TARGET_NR_readahead:
8264 #if TARGET_ABI_BITS == 32
8265 if (regpairs_aligned(cpu_env)) {
8266 arg2 = arg3;
8267 arg3 = arg4;
8268 arg4 = arg5;
8269 }
8270 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8271 #else
8272 ret = get_errno(readahead(arg1, arg2, arg3));
8273 #endif
8274 break;
8275 #endif
8276 #ifdef CONFIG_ATTR
8277 #ifdef TARGET_NR_setxattr
8278 case TARGET_NR_listxattr:
8279 case TARGET_NR_llistxattr:
8280 {
8281 void *p, *b = 0;
8282 if (arg2) {
8283 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8284 if (!b) {
8285 ret = -TARGET_EFAULT;
8286 break;
8287 }
8288 }
8289 p = lock_user_string(arg1);
8290 if (p) {
8291 if (num == TARGET_NR_listxattr) {
8292 ret = get_errno(listxattr(p, b, arg3));
8293 } else {
8294 ret = get_errno(llistxattr(p, b, arg3));
8295 }
8296 } else {
8297 ret = -TARGET_EFAULT;
8298 }
8299 unlock_user(p, arg1, 0);
8300 unlock_user(b, arg2, arg3);
8301 break;
8302 }
8303 case TARGET_NR_flistxattr:
8304 {
8305 void *b = 0;
8306 if (arg2) {
8307 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8308 if (!b) {
8309 ret = -TARGET_EFAULT;
8310 break;
8311 }
8312 }
8313 ret = get_errno(flistxattr(arg1, b, arg3));
8314 unlock_user(b, arg2, arg3);
8315 break;
8316 }
8317 case TARGET_NR_setxattr:
8318 case TARGET_NR_lsetxattr:
8319 {
8320 void *p, *n, *v = 0;
8321 if (arg3) {
8322 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8323 if (!v) {
8324 ret = -TARGET_EFAULT;
8325 break;
8326 }
8327 }
8328 p = lock_user_string(arg1);
8329 n = lock_user_string(arg2);
8330 if (p && n) {
8331 if (num == TARGET_NR_setxattr) {
8332 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8333 } else {
8334 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8335 }
8336 } else {
8337 ret = -TARGET_EFAULT;
8338 }
8339 unlock_user(p, arg1, 0);
8340 unlock_user(n, arg2, 0);
8341 unlock_user(v, arg3, 0);
8342 }
8343 break;
8344 case TARGET_NR_fsetxattr:
8345 {
8346 void *n, *v = 0;
8347 if (arg3) {
8348 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8349 if (!v) {
8350 ret = -TARGET_EFAULT;
8351 break;
8352 }
8353 }
8354 n = lock_user_string(arg2);
8355 if (n) {
8356 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8357 } else {
8358 ret = -TARGET_EFAULT;
8359 }
8360 unlock_user(n, arg2, 0);
8361 unlock_user(v, arg3, 0);
8362 }
8363 break;
8364 case TARGET_NR_getxattr:
8365 case TARGET_NR_lgetxattr:
8366 {
8367 void *p, *n, *v = 0;
8368 if (arg3) {
8369 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8370 if (!v) {
8371 ret = -TARGET_EFAULT;
8372 break;
8373 }
8374 }
8375 p = lock_user_string(arg1);
8376 n = lock_user_string(arg2);
8377 if (p && n) {
8378 if (num == TARGET_NR_getxattr) {
8379 ret = get_errno(getxattr(p, n, v, arg4));
8380 } else {
8381 ret = get_errno(lgetxattr(p, n, v, arg4));
8382 }
8383 } else {
8384 ret = -TARGET_EFAULT;
8385 }
8386 unlock_user(p, arg1, 0);
8387 unlock_user(n, arg2, 0);
8388 unlock_user(v, arg3, arg4);
8389 }
8390 break;
8391 case TARGET_NR_fgetxattr:
8392 {
8393 void *n, *v = 0;
8394 if (arg3) {
8395 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8396 if (!v) {
8397 ret = -TARGET_EFAULT;
8398 break;
8399 }
8400 }
8401 n = lock_user_string(arg2);
8402 if (n) {
8403 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8404 } else {
8405 ret = -TARGET_EFAULT;
8406 }
8407 unlock_user(n, arg2, 0);
8408 unlock_user(v, arg3, arg4);
8409 }
8410 break;
8411 case TARGET_NR_removexattr:
8412 case TARGET_NR_lremovexattr:
8413 {
8414 void *p, *n;
8415 p = lock_user_string(arg1);
8416 n = lock_user_string(arg2);
8417 if (p && n) {
8418 if (num == TARGET_NR_removexattr) {
8419 ret = get_errno(removexattr(p, n));
8420 } else {
8421 ret = get_errno(lremovexattr(p, n));
8422 }
8423 } else {
8424 ret = -TARGET_EFAULT;
8425 }
8426 unlock_user(p, arg1, 0);
8427 unlock_user(n, arg2, 0);
8428 }
8429 break;
8430 case TARGET_NR_fremovexattr:
8431 {
8432 void *n;
8433 n = lock_user_string(arg2);
8434 if (n) {
8435 ret = get_errno(fremovexattr(arg1, n));
8436 } else {
8437 ret = -TARGET_EFAULT;
8438 }
8439 unlock_user(n, arg2, 0);
8440 }
8441 break;
8442 #endif
8443 #endif /* CONFIG_ATTR */
8444 #ifdef TARGET_NR_set_thread_area
8445 case TARGET_NR_set_thread_area:
8446 #if defined(TARGET_MIPS)
8447 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8448 ret = 0;
8449 break;
8450 #elif defined(TARGET_CRIS)
8451 if (arg1 & 0xff)
8452 ret = -TARGET_EINVAL;
8453 else {
8454 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8455 ret = 0;
8456 }
8457 break;
8458 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8459 ret = do_set_thread_area(cpu_env, arg1);
8460 break;
8461 #else
8462 goto unimplemented_nowarn;
8463 #endif
8464 #endif
8465 #ifdef TARGET_NR_get_thread_area
8466 case TARGET_NR_get_thread_area:
8467 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8468 ret = do_get_thread_area(cpu_env, arg1);
8469 #else
8470 goto unimplemented_nowarn;
8471 #endif
8472 #endif
8473 #ifdef TARGET_NR_getdomainname
8474 case TARGET_NR_getdomainname:
8475 goto unimplemented_nowarn;
8476 #endif
8477
8478 #ifdef TARGET_NR_clock_gettime
8479 case TARGET_NR_clock_gettime:
8480 {
8481 struct timespec ts;
8482 ret = get_errno(clock_gettime(arg1, &ts));
8483 if (!is_error(ret)) {
8484 host_to_target_timespec(arg2, &ts);
8485 }
8486 break;
8487 }
8488 #endif
8489 #ifdef TARGET_NR_clock_getres
8490 case TARGET_NR_clock_getres:
8491 {
8492 struct timespec ts;
8493 ret = get_errno(clock_getres(arg1, &ts));
8494 if (!is_error(ret)) {
8495 host_to_target_timespec(arg2, &ts);
8496 }
8497 break;
8498 }
8499 #endif
8500 #ifdef TARGET_NR_clock_nanosleep
8501 case TARGET_NR_clock_nanosleep:
8502 {
8503 struct timespec ts;
8504 target_to_host_timespec(&ts, arg3);
8505 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8506 if (arg4)
8507 host_to_target_timespec(arg4, &ts);
8508 break;
8509 }
8510 #endif
8511
8512 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8513 case TARGET_NR_set_tid_address:
8514 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8515 break;
8516 #endif
8517
8518 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8519 case TARGET_NR_tkill:
8520 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8521 break;
8522 #endif
8523
8524 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8525 case TARGET_NR_tgkill:
8526 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8527 target_to_host_signal(arg3)));
8528 break;
8529 #endif
8530
8531 #ifdef TARGET_NR_set_robust_list
8532 case TARGET_NR_set_robust_list:
8533 goto unimplemented_nowarn;
8534 #endif
8535
8536 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8537 case TARGET_NR_utimensat:
8538 {
8539 struct timespec *tsp, ts[2];
8540 if (!arg3) {
8541 tsp = NULL;
8542 } else {
8543 target_to_host_timespec(ts, arg3);
8544 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8545 tsp = ts;
8546 }
8547 if (!arg2)
8548 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8549 else {
8550 if (!(p = lock_user_string(arg2))) {
8551 ret = -TARGET_EFAULT;
8552 goto fail;
8553 }
8554 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8555 unlock_user(p, arg2, 0);
8556 }
8557 }
8558 break;
8559 #endif
8560 #if defined(CONFIG_USE_NPTL)
8561 case TARGET_NR_futex:
8562 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8563 break;
8564 #endif
8565 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8566 case TARGET_NR_inotify_init:
8567 ret = get_errno(sys_inotify_init());
8568 break;
8569 #endif
8570 #ifdef CONFIG_INOTIFY1
8571 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8572 case TARGET_NR_inotify_init1:
8573 ret = get_errno(sys_inotify_init1(arg1));
8574 break;
8575 #endif
8576 #endif
8577 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8578 case TARGET_NR_inotify_add_watch:
8579 p = lock_user_string(arg2);
8580 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8581 unlock_user(p, arg2, 0);
8582 break;
8583 #endif
8584 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8585 case TARGET_NR_inotify_rm_watch:
8586 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8587 break;
8588 #endif
8589
8590 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8591 case TARGET_NR_mq_open:
8592 {
8593 struct mq_attr posix_mq_attr;
8594
8595 p = lock_user_string(arg1 - 1);
8596 if (arg4 != 0)
8597 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8598 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8599 unlock_user (p, arg1, 0);
8600 }
8601 break;
8602
8603 case TARGET_NR_mq_unlink:
8604 p = lock_user_string(arg1 - 1);
8605 ret = get_errno(mq_unlink(p));
8606 unlock_user (p, arg1, 0);
8607 break;
8608
8609 case TARGET_NR_mq_timedsend:
8610 {
8611 struct timespec ts;
8612
8613 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8614 if (arg5 != 0) {
8615 target_to_host_timespec(&ts, arg5);
8616 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8617 host_to_target_timespec(arg5, &ts);
8618 }
8619 else
8620 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8621 unlock_user (p, arg2, arg3);
8622 }
8623 break;
8624
8625 case TARGET_NR_mq_timedreceive:
8626 {
8627 struct timespec ts;
8628 unsigned int prio;
8629
8630 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8631 if (arg5 != 0) {
8632 target_to_host_timespec(&ts, arg5);
8633 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8634 host_to_target_timespec(arg5, &ts);
8635 }
8636 else
8637 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8638 unlock_user (p, arg2, arg3);
8639 if (arg4 != 0)
8640 put_user_u32(prio, arg4);
8641 }
8642 break;
8643
8644 /* Not implemented for now... */
8645 /* case TARGET_NR_mq_notify: */
8646 /* break; */
8647
8648 case TARGET_NR_mq_getsetattr:
8649 {
8650 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8651 ret = 0;
8652 if (arg3 != 0) {
8653 ret = mq_getattr(arg1, &posix_mq_attr_out);
8654 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8655 }
8656 if (arg2 != 0) {
8657 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8658 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8659 }
8660
8661 }
8662 break;
8663 #endif
8664
8665 #ifdef CONFIG_SPLICE
8666 #ifdef TARGET_NR_tee
8667 case TARGET_NR_tee:
8668 {
8669 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8670 }
8671 break;
8672 #endif
8673 #ifdef TARGET_NR_splice
8674 case TARGET_NR_splice:
8675 {
8676 loff_t loff_in, loff_out;
8677 loff_t *ploff_in = NULL, *ploff_out = NULL;
8678 if(arg2) {
8679 get_user_u64(loff_in, arg2);
8680 ploff_in = &loff_in;
8681 }
8682 if(arg4) {
8683 get_user_u64(loff_out, arg2);
8684 ploff_out = &loff_out;
8685 }
8686 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8687 }
8688 break;
8689 #endif
8690 #ifdef TARGET_NR_vmsplice
8691 case TARGET_NR_vmsplice:
8692 {
8693 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8694 if (vec != NULL) {
8695 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8696 unlock_iovec(vec, arg2, arg3, 0);
8697 } else {
8698 ret = -host_to_target_errno(errno);
8699 }
8700 }
8701 break;
8702 #endif
8703 #endif /* CONFIG_SPLICE */
8704 #ifdef CONFIG_EVENTFD
8705 #if defined(TARGET_NR_eventfd)
8706 case TARGET_NR_eventfd:
8707 ret = get_errno(eventfd(arg1, 0));
8708 break;
8709 #endif
8710 #if defined(TARGET_NR_eventfd2)
8711 case TARGET_NR_eventfd2:
8712 ret = get_errno(eventfd(arg1, arg2));
8713 break;
8714 #endif
8715 #endif /* CONFIG_EVENTFD */
8716 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8717 case TARGET_NR_fallocate:
8718 #if TARGET_ABI_BITS == 32
8719 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8720 target_offset64(arg5, arg6)));
8721 #else
8722 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8723 #endif
8724 break;
8725 #endif
8726 #if defined(CONFIG_SYNC_FILE_RANGE)
8727 #if defined(TARGET_NR_sync_file_range)
8728 case TARGET_NR_sync_file_range:
8729 #if TARGET_ABI_BITS == 32
8730 #if defined(TARGET_MIPS)
8731 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8732 target_offset64(arg5, arg6), arg7));
8733 #else
8734 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8735 target_offset64(arg4, arg5), arg6));
8736 #endif /* !TARGET_MIPS */
8737 #else
8738 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8739 #endif
8740 break;
8741 #endif
8742 #if defined(TARGET_NR_sync_file_range2)
8743 case TARGET_NR_sync_file_range2:
8744 /* This is like sync_file_range but the arguments are reordered */
8745 #if TARGET_ABI_BITS == 32
8746 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8747 target_offset64(arg5, arg6), arg2));
8748 #else
8749 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8750 #endif
8751 break;
8752 #endif
8753 #endif
8754 #if defined(CONFIG_EPOLL)
8755 #if defined(TARGET_NR_epoll_create)
8756 case TARGET_NR_epoll_create:
8757 ret = get_errno(epoll_create(arg1));
8758 break;
8759 #endif
8760 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8761 case TARGET_NR_epoll_create1:
8762 ret = get_errno(epoll_create1(arg1));
8763 break;
8764 #endif
8765 #if defined(TARGET_NR_epoll_ctl)
8766 case TARGET_NR_epoll_ctl:
8767 {
8768 struct epoll_event ep;
8769 struct epoll_event *epp = 0;
8770 if (arg4) {
8771 struct target_epoll_event *target_ep;
8772 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8773 goto efault;
8774 }
8775 ep.events = tswap32(target_ep->events);
8776 /* The epoll_data_t union is just opaque data to the kernel,
8777 * so we transfer all 64 bits across and need not worry what
8778 * actual data type it is.
8779 */
8780 ep.data.u64 = tswap64(target_ep->data.u64);
8781 unlock_user_struct(target_ep, arg4, 0);
8782 epp = &ep;
8783 }
8784 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8785 break;
8786 }
8787 #endif
8788
8789 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8790 #define IMPLEMENT_EPOLL_PWAIT
8791 #endif
8792 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8793 #if defined(TARGET_NR_epoll_wait)
8794 case TARGET_NR_epoll_wait:
8795 #endif
8796 #if defined(IMPLEMENT_EPOLL_PWAIT)
8797 case TARGET_NR_epoll_pwait:
8798 #endif
8799 {
8800 struct target_epoll_event *target_ep;
8801 struct epoll_event *ep;
8802 int epfd = arg1;
8803 int maxevents = arg3;
8804 int timeout = arg4;
8805
8806 target_ep = lock_user(VERIFY_WRITE, arg2,
8807 maxevents * sizeof(struct target_epoll_event), 1);
8808 if (!target_ep) {
8809 goto efault;
8810 }
8811
8812 ep = alloca(maxevents * sizeof(struct epoll_event));
8813
8814 switch (num) {
8815 #if defined(IMPLEMENT_EPOLL_PWAIT)
8816 case TARGET_NR_epoll_pwait:
8817 {
8818 target_sigset_t *target_set;
8819 sigset_t _set, *set = &_set;
8820
8821 if (arg5) {
8822 target_set = lock_user(VERIFY_READ, arg5,
8823 sizeof(target_sigset_t), 1);
8824 if (!target_set) {
8825 unlock_user(target_ep, arg2, 0);
8826 goto efault;
8827 }
8828 target_to_host_sigset(set, target_set);
8829 unlock_user(target_set, arg5, 0);
8830 } else {
8831 set = NULL;
8832 }
8833
8834 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8835 break;
8836 }
8837 #endif
8838 #if defined(TARGET_NR_epoll_wait)
8839 case TARGET_NR_epoll_wait:
8840 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8841 break;
8842 #endif
8843 default:
8844 ret = -TARGET_ENOSYS;
8845 }
8846 if (!is_error(ret)) {
8847 int i;
8848 for (i = 0; i < ret; i++) {
8849 target_ep[i].events = tswap32(ep[i].events);
8850 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8851 }
8852 }
8853 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8854 break;
8855 }
8856 #endif
8857 #endif
8858 #ifdef TARGET_NR_prlimit64
8859 case TARGET_NR_prlimit64:
8860 {
8861 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8862 struct target_rlimit64 *target_rnew, *target_rold;
8863 struct host_rlimit64 rnew, rold, *rnewp = 0;
8864 if (arg3) {
8865 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8866 goto efault;
8867 }
8868 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8869 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8870 unlock_user_struct(target_rnew, arg3, 0);
8871 rnewp = &rnew;
8872 }
8873
8874 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8875 if (!is_error(ret) && arg4) {
8876 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8877 goto efault;
8878 }
8879 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8880 target_rold->rlim_max = tswap64(rold.rlim_max);
8881 unlock_user_struct(target_rold, arg4, 1);
8882 }
8883 break;
8884 }
8885 #endif
8886 #ifdef TARGET_NR_gethostname
8887 case TARGET_NR_gethostname:
8888 {
8889 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8890 if (name) {
8891 ret = get_errno(gethostname(name, arg2));
8892 unlock_user(name, arg1, arg2);
8893 } else {
8894 ret = -TARGET_EFAULT;
8895 }
8896 break;
8897 }
8898 #endif
8899 default:
8900 unimplemented:
8901 gemu_log("qemu: Unsupported syscall: %d\n", num);
8902 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8903 unimplemented_nowarn:
8904 #endif
8905 ret = -TARGET_ENOSYS;
8906 break;
8907 }
8908 fail:
8909 #ifdef DEBUG
8910 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8911 #endif
8912 if(do_strace)
8913 print_syscall_ret(num, ret);
8914 return ret;
8915 efault:
8916 ret = -TARGET_EFAULT;
8917 goto fail;
8918 }