]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
Merge remote-tracking branch 'bonzini/scsi-next' into staging
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <linux/icmp.h>
64 #include "qemu-common.h"
65 #ifdef TARGET_GPROF
66 #include <sys/gmon.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu-xattr.h"
76 #endif
77
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/utsname.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include "linux_loop.h"
101 #include "cpu-uname.h"
102
103 #include "qemu.h"
104
105 #if defined(CONFIG_USE_NPTL)
106 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
107 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
108 #else
109 /* XXX: Hardcode the above values. */
110 #define CLONE_NPTL_FLAGS2 0
111 #endif
112
113 //#define DEBUG
114
115 //#include <linux/msdos_fs.h>
116 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
117 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
118
119
120 #undef _syscall0
121 #undef _syscall1
122 #undef _syscall2
123 #undef _syscall3
124 #undef _syscall4
125 #undef _syscall5
126 #undef _syscall6
127
128 #define _syscall0(type,name) \
129 static type name (void) \
130 { \
131 return syscall(__NR_##name); \
132 }
133
134 #define _syscall1(type,name,type1,arg1) \
135 static type name (type1 arg1) \
136 { \
137 return syscall(__NR_##name, arg1); \
138 }
139
140 #define _syscall2(type,name,type1,arg1,type2,arg2) \
141 static type name (type1 arg1,type2 arg2) \
142 { \
143 return syscall(__NR_##name, arg1, arg2); \
144 }
145
146 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
147 static type name (type1 arg1,type2 arg2,type3 arg3) \
148 { \
149 return syscall(__NR_##name, arg1, arg2, arg3); \
150 }
151
152 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
153 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
154 { \
155 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
156 }
157
158 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
159 type5,arg5) \
160 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
161 { \
162 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
163 }
164
165
166 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5,type6,arg6) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
169 type6 arg6) \
170 { \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
172 }
173
174
175 #define __NR_sys_uname __NR_uname
176 #define __NR_sys_faccessat __NR_faccessat
177 #define __NR_sys_fchmodat __NR_fchmodat
178 #define __NR_sys_fchownat __NR_fchownat
179 #define __NR_sys_fstatat64 __NR_fstatat64
180 #define __NR_sys_futimesat __NR_futimesat
181 #define __NR_sys_getcwd1 __NR_getcwd
182 #define __NR_sys_getdents __NR_getdents
183 #define __NR_sys_getdents64 __NR_getdents64
184 #define __NR_sys_getpriority __NR_getpriority
185 #define __NR_sys_linkat __NR_linkat
186 #define __NR_sys_mkdirat __NR_mkdirat
187 #define __NR_sys_mknodat __NR_mknodat
188 #define __NR_sys_newfstatat __NR_newfstatat
189 #define __NR_sys_openat __NR_openat
190 #define __NR_sys_readlinkat __NR_readlinkat
191 #define __NR_sys_renameat __NR_renameat
192 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
193 #define __NR_sys_symlinkat __NR_symlinkat
194 #define __NR_sys_syslog __NR_syslog
195 #define __NR_sys_tgkill __NR_tgkill
196 #define __NR_sys_tkill __NR_tkill
197 #define __NR_sys_unlinkat __NR_unlinkat
198 #define __NR_sys_utimensat __NR_utimensat
199 #define __NR_sys_futex __NR_futex
200 #define __NR_sys_inotify_init __NR_inotify_init
201 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
202 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
203
204 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
205 defined(__s390x__)
206 #define __NR__llseek __NR_lseek
207 #endif
208
209 #ifdef __NR_gettid
210 _syscall0(int, gettid)
211 #else
212 /* This is a replacement for the host gettid() and must return a host
213 errno. */
214 static int gettid(void) {
215 return -ENOSYS;
216 }
217 #endif
218 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
219 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
220 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
221 #endif
222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
223 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
224 loff_t *, res, uint, wh);
225 #endif
226 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
227 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
228 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
229 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
230 #endif
231 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
232 _syscall2(int,sys_tkill,int,tid,int,sig)
233 #endif
234 #ifdef __NR_exit_group
235 _syscall1(int,exit_group,int,error_code)
236 #endif
237 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
238 _syscall1(int,set_tid_address,int *,tidptr)
239 #endif
240 #if defined(CONFIG_USE_NPTL)
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
243 const struct timespec *,timeout,int *,uaddr2,int,val3)
244 #endif
245 #endif
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
251 unsigned long *, user_mask_ptr);
252 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
253 void *, arg);
254
255 static bitmask_transtbl fcntl_flags_tbl[] = {
256 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
257 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
258 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
259 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
260 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
261 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
262 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
263 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
264 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
265 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
266 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
267 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
268 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
269 #if defined(O_DIRECT)
270 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
271 #endif
272 #if defined(O_NOATIME)
273 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
274 #endif
275 #if defined(O_CLOEXEC)
276 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
277 #endif
278 #if defined(O_PATH)
279 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
280 #endif
281 /* Don't terminate the list prematurely on 64-bit host+guest. */
282 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
283 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
284 #endif
285 { 0, 0, 0, 0 }
286 };
287
288 #define COPY_UTSNAME_FIELD(dest, src) \
289 do { \
290 /* __NEW_UTS_LEN doesn't include terminating null */ \
291 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
292 (dest)[__NEW_UTS_LEN] = '\0'; \
293 } while (0)
294
295 static int sys_uname(struct new_utsname *buf)
296 {
297 struct utsname uts_buf;
298
299 if (uname(&uts_buf) < 0)
300 return (-1);
301
302 /*
303 * Just in case these have some differences, we
304 * translate utsname to new_utsname (which is the
305 * struct linux kernel uses).
306 */
307
308 memset(buf, 0, sizeof(*buf));
309 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
310 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
311 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
312 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
313 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
314 #ifdef _GNU_SOURCE
315 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
316 #endif
317 return (0);
318
319 #undef COPY_UTSNAME_FIELD
320 }
321
322 static int sys_getcwd1(char *buf, size_t size)
323 {
324 if (getcwd(buf, size) == NULL) {
325 /* getcwd() sets errno */
326 return (-1);
327 }
328 return strlen(buf)+1;
329 }
330
331 #ifdef CONFIG_ATFILE
332 /*
333 * Host system seems to have atfile syscall stubs available. We
334 * now enable them one by one as specified by target syscall_nr.h.
335 */
336
337 #ifdef TARGET_NR_faccessat
338 static int sys_faccessat(int dirfd, const char *pathname, int mode)
339 {
340 return (faccessat(dirfd, pathname, mode, 0));
341 }
342 #endif
343 #ifdef TARGET_NR_fchmodat
344 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
345 {
346 return (fchmodat(dirfd, pathname, mode, 0));
347 }
348 #endif
349 #if defined(TARGET_NR_fchownat)
350 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
351 gid_t group, int flags)
352 {
353 return (fchownat(dirfd, pathname, owner, group, flags));
354 }
355 #endif
356 #ifdef __NR_fstatat64
357 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
358 int flags)
359 {
360 return (fstatat(dirfd, pathname, buf, flags));
361 }
362 #endif
363 #ifdef __NR_newfstatat
364 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
365 int flags)
366 {
367 return (fstatat(dirfd, pathname, buf, flags));
368 }
369 #endif
370 #ifdef TARGET_NR_futimesat
371 static int sys_futimesat(int dirfd, const char *pathname,
372 const struct timeval times[2])
373 {
374 return (futimesat(dirfd, pathname, times));
375 }
376 #endif
377 #ifdef TARGET_NR_linkat
378 static int sys_linkat(int olddirfd, const char *oldpath,
379 int newdirfd, const char *newpath, int flags)
380 {
381 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
382 }
383 #endif
384 #ifdef TARGET_NR_mkdirat
385 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
386 {
387 return (mkdirat(dirfd, pathname, mode));
388 }
389 #endif
390 #ifdef TARGET_NR_mknodat
391 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
392 dev_t dev)
393 {
394 return (mknodat(dirfd, pathname, mode, dev));
395 }
396 #endif
397 #ifdef TARGET_NR_openat
398 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
399 {
400 /*
401 * open(2) has extra parameter 'mode' when called with
402 * flag O_CREAT.
403 */
404 if ((flags & O_CREAT) != 0) {
405 return (openat(dirfd, pathname, flags, mode));
406 }
407 return (openat(dirfd, pathname, flags));
408 }
409 #endif
410 #ifdef TARGET_NR_readlinkat
411 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
412 {
413 return (readlinkat(dirfd, pathname, buf, bufsiz));
414 }
415 #endif
416 #ifdef TARGET_NR_renameat
417 static int sys_renameat(int olddirfd, const char *oldpath,
418 int newdirfd, const char *newpath)
419 {
420 return (renameat(olddirfd, oldpath, newdirfd, newpath));
421 }
422 #endif
423 #ifdef TARGET_NR_symlinkat
424 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
425 {
426 return (symlinkat(oldpath, newdirfd, newpath));
427 }
428 #endif
429 #ifdef TARGET_NR_unlinkat
430 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
431 {
432 return (unlinkat(dirfd, pathname, flags));
433 }
434 #endif
435 #else /* !CONFIG_ATFILE */
436
437 /*
438 * Try direct syscalls instead
439 */
440 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
441 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
442 #endif
443 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
444 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
445 #endif
446 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
447 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
448 uid_t,owner,gid_t,group,int,flags)
449 #endif
450 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
451 defined(__NR_fstatat64)
452 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
453 struct stat *,buf,int,flags)
454 #endif
455 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
456 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
457 const struct timeval *,times)
458 #endif
459 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
460 defined(__NR_newfstatat)
461 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
462 struct stat *,buf,int,flags)
463 #endif
464 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
465 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
466 int,newdirfd,const char *,newpath,int,flags)
467 #endif
468 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
469 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
470 #endif
471 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
472 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
473 mode_t,mode,dev_t,dev)
474 #endif
475 #if defined(TARGET_NR_openat) && defined(__NR_openat)
476 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
477 #endif
478 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
479 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
480 char *,buf,size_t,bufsize)
481 #endif
482 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
483 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
484 int,newdirfd,const char *,newpath)
485 #endif
486 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
487 _syscall3(int,sys_symlinkat,const char *,oldpath,
488 int,newdirfd,const char *,newpath)
489 #endif
490 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
491 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
492 #endif
493
494 #endif /* CONFIG_ATFILE */
495
496 #ifdef CONFIG_UTIMENSAT
497 static int sys_utimensat(int dirfd, const char *pathname,
498 const struct timespec times[2], int flags)
499 {
500 if (pathname == NULL)
501 return futimens(dirfd, times);
502 else
503 return utimensat(dirfd, pathname, times, flags);
504 }
505 #else
506 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
507 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
508 const struct timespec *,tsp,int,flags)
509 #endif
510 #endif /* CONFIG_UTIMENSAT */
511
512 #ifdef CONFIG_INOTIFY
513 #include <sys/inotify.h>
514
515 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
516 static int sys_inotify_init(void)
517 {
518 return (inotify_init());
519 }
520 #endif
521 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
522 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
523 {
524 return (inotify_add_watch(fd, pathname, mask));
525 }
526 #endif
527 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
528 static int sys_inotify_rm_watch(int fd, int32_t wd)
529 {
530 return (inotify_rm_watch(fd, wd));
531 }
532 #endif
533 #ifdef CONFIG_INOTIFY1
534 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
535 static int sys_inotify_init1(int flags)
536 {
537 return (inotify_init1(flags));
538 }
539 #endif
540 #endif
541 #else
542 /* Userspace can usually survive runtime without inotify */
543 #undef TARGET_NR_inotify_init
544 #undef TARGET_NR_inotify_init1
545 #undef TARGET_NR_inotify_add_watch
546 #undef TARGET_NR_inotify_rm_watch
547 #endif /* CONFIG_INOTIFY */
548
549 #if defined(TARGET_NR_ppoll)
550 #ifndef __NR_ppoll
551 # define __NR_ppoll -1
552 #endif
553 #define __NR_sys_ppoll __NR_ppoll
554 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
555 struct timespec *, timeout, const __sigset_t *, sigmask,
556 size_t, sigsetsize)
557 #endif
558
559 #if defined(TARGET_NR_pselect6)
560 #ifndef __NR_pselect6
561 # define __NR_pselect6 -1
562 #endif
563 #define __NR_sys_pselect6 __NR_pselect6
564 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
565 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
566 #endif
567
568 #if defined(TARGET_NR_prlimit64)
569 #ifndef __NR_prlimit64
570 # define __NR_prlimit64 -1
571 #endif
572 #define __NR_sys_prlimit64 __NR_prlimit64
573 /* The glibc rlimit structure may not be that used by the underlying syscall */
574 struct host_rlimit64 {
575 uint64_t rlim_cur;
576 uint64_t rlim_max;
577 };
578 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
579 const struct host_rlimit64 *, new_limit,
580 struct host_rlimit64 *, old_limit)
581 #endif
582
583 extern int personality(int);
584 extern int flock(int, int);
585 extern int setfsuid(int);
586 extern int setfsgid(int);
587 extern int setgroups(int, gid_t *);
588
589 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
590 #ifdef TARGET_ARM
591 static inline int regpairs_aligned(void *cpu_env) {
592 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
593 }
594 #elif defined(TARGET_MIPS)
595 static inline int regpairs_aligned(void *cpu_env) { return 1; }
596 #else
597 static inline int regpairs_aligned(void *cpu_env) { return 0; }
598 #endif
599
600 #define ERRNO_TABLE_SIZE 1200
601
602 /* target_to_host_errno_table[] is initialized from
603 * host_to_target_errno_table[] in syscall_init(). */
604 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
605 };
606
607 /*
608 * This list is the union of errno values overridden in asm-<arch>/errno.h
609 * minus the errnos that are not actually generic to all archs.
610 */
611 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
612 [EIDRM] = TARGET_EIDRM,
613 [ECHRNG] = TARGET_ECHRNG,
614 [EL2NSYNC] = TARGET_EL2NSYNC,
615 [EL3HLT] = TARGET_EL3HLT,
616 [EL3RST] = TARGET_EL3RST,
617 [ELNRNG] = TARGET_ELNRNG,
618 [EUNATCH] = TARGET_EUNATCH,
619 [ENOCSI] = TARGET_ENOCSI,
620 [EL2HLT] = TARGET_EL2HLT,
621 [EDEADLK] = TARGET_EDEADLK,
622 [ENOLCK] = TARGET_ENOLCK,
623 [EBADE] = TARGET_EBADE,
624 [EBADR] = TARGET_EBADR,
625 [EXFULL] = TARGET_EXFULL,
626 [ENOANO] = TARGET_ENOANO,
627 [EBADRQC] = TARGET_EBADRQC,
628 [EBADSLT] = TARGET_EBADSLT,
629 [EBFONT] = TARGET_EBFONT,
630 [ENOSTR] = TARGET_ENOSTR,
631 [ENODATA] = TARGET_ENODATA,
632 [ETIME] = TARGET_ETIME,
633 [ENOSR] = TARGET_ENOSR,
634 [ENONET] = TARGET_ENONET,
635 [ENOPKG] = TARGET_ENOPKG,
636 [EREMOTE] = TARGET_EREMOTE,
637 [ENOLINK] = TARGET_ENOLINK,
638 [EADV] = TARGET_EADV,
639 [ESRMNT] = TARGET_ESRMNT,
640 [ECOMM] = TARGET_ECOMM,
641 [EPROTO] = TARGET_EPROTO,
642 [EDOTDOT] = TARGET_EDOTDOT,
643 [EMULTIHOP] = TARGET_EMULTIHOP,
644 [EBADMSG] = TARGET_EBADMSG,
645 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
646 [EOVERFLOW] = TARGET_EOVERFLOW,
647 [ENOTUNIQ] = TARGET_ENOTUNIQ,
648 [EBADFD] = TARGET_EBADFD,
649 [EREMCHG] = TARGET_EREMCHG,
650 [ELIBACC] = TARGET_ELIBACC,
651 [ELIBBAD] = TARGET_ELIBBAD,
652 [ELIBSCN] = TARGET_ELIBSCN,
653 [ELIBMAX] = TARGET_ELIBMAX,
654 [ELIBEXEC] = TARGET_ELIBEXEC,
655 [EILSEQ] = TARGET_EILSEQ,
656 [ENOSYS] = TARGET_ENOSYS,
657 [ELOOP] = TARGET_ELOOP,
658 [ERESTART] = TARGET_ERESTART,
659 [ESTRPIPE] = TARGET_ESTRPIPE,
660 [ENOTEMPTY] = TARGET_ENOTEMPTY,
661 [EUSERS] = TARGET_EUSERS,
662 [ENOTSOCK] = TARGET_ENOTSOCK,
663 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
664 [EMSGSIZE] = TARGET_EMSGSIZE,
665 [EPROTOTYPE] = TARGET_EPROTOTYPE,
666 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
667 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
668 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
669 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
670 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
671 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
672 [EADDRINUSE] = TARGET_EADDRINUSE,
673 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
674 [ENETDOWN] = TARGET_ENETDOWN,
675 [ENETUNREACH] = TARGET_ENETUNREACH,
676 [ENETRESET] = TARGET_ENETRESET,
677 [ECONNABORTED] = TARGET_ECONNABORTED,
678 [ECONNRESET] = TARGET_ECONNRESET,
679 [ENOBUFS] = TARGET_ENOBUFS,
680 [EISCONN] = TARGET_EISCONN,
681 [ENOTCONN] = TARGET_ENOTCONN,
682 [EUCLEAN] = TARGET_EUCLEAN,
683 [ENOTNAM] = TARGET_ENOTNAM,
684 [ENAVAIL] = TARGET_ENAVAIL,
685 [EISNAM] = TARGET_EISNAM,
686 [EREMOTEIO] = TARGET_EREMOTEIO,
687 [ESHUTDOWN] = TARGET_ESHUTDOWN,
688 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
689 [ETIMEDOUT] = TARGET_ETIMEDOUT,
690 [ECONNREFUSED] = TARGET_ECONNREFUSED,
691 [EHOSTDOWN] = TARGET_EHOSTDOWN,
692 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
693 [EALREADY] = TARGET_EALREADY,
694 [EINPROGRESS] = TARGET_EINPROGRESS,
695 [ESTALE] = TARGET_ESTALE,
696 [ECANCELED] = TARGET_ECANCELED,
697 [ENOMEDIUM] = TARGET_ENOMEDIUM,
698 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
699 #ifdef ENOKEY
700 [ENOKEY] = TARGET_ENOKEY,
701 #endif
702 #ifdef EKEYEXPIRED
703 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
704 #endif
705 #ifdef EKEYREVOKED
706 [EKEYREVOKED] = TARGET_EKEYREVOKED,
707 #endif
708 #ifdef EKEYREJECTED
709 [EKEYREJECTED] = TARGET_EKEYREJECTED,
710 #endif
711 #ifdef EOWNERDEAD
712 [EOWNERDEAD] = TARGET_EOWNERDEAD,
713 #endif
714 #ifdef ENOTRECOVERABLE
715 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
716 #endif
717 };
718
719 static inline int host_to_target_errno(int err)
720 {
721 if(host_to_target_errno_table[err])
722 return host_to_target_errno_table[err];
723 return err;
724 }
725
726 static inline int target_to_host_errno(int err)
727 {
728 if (target_to_host_errno_table[err])
729 return target_to_host_errno_table[err];
730 return err;
731 }
732
733 static inline abi_long get_errno(abi_long ret)
734 {
735 if (ret == -1)
736 return -host_to_target_errno(errno);
737 else
738 return ret;
739 }
740
741 static inline int is_error(abi_long ret)
742 {
743 return (abi_ulong)ret >= (abi_ulong)(-4096);
744 }
745
746 char *target_strerror(int err)
747 {
748 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
749 return NULL;
750 }
751 return strerror(target_to_host_errno(err));
752 }
753
754 static abi_ulong target_brk;
755 static abi_ulong target_original_brk;
756 static abi_ulong brk_page;
757
758 void target_set_brk(abi_ulong new_brk)
759 {
760 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
761 brk_page = HOST_PAGE_ALIGN(target_brk);
762 }
763
764 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
765 #define DEBUGF_BRK(message, args...)
766
767 /* do_brk() must return target values and target errnos. */
768 abi_long do_brk(abi_ulong new_brk)
769 {
770 abi_long mapped_addr;
771 int new_alloc_size;
772
773 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
774
775 if (!new_brk) {
776 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
777 return target_brk;
778 }
779 if (new_brk < target_original_brk) {
780 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
781 target_brk);
782 return target_brk;
783 }
784
785 /* If the new brk is less than the highest page reserved to the
786 * target heap allocation, set it and we're almost done... */
787 if (new_brk <= brk_page) {
788 /* Heap contents are initialized to zero, as for anonymous
789 * mapped pages. */
790 if (new_brk > target_brk) {
791 memset(g2h(target_brk), 0, new_brk - target_brk);
792 }
793 target_brk = new_brk;
794 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
795 return target_brk;
796 }
797
798 /* We need to allocate more memory after the brk... Note that
799 * we don't use MAP_FIXED because that will map over the top of
800 * any existing mapping (like the one with the host libc or qemu
801 * itself); instead we treat "mapped but at wrong address" as
802 * a failure and unmap again.
803 */
804 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
805 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
806 PROT_READ|PROT_WRITE,
807 MAP_ANON|MAP_PRIVATE, 0, 0));
808
809 if (mapped_addr == brk_page) {
810 /* Heap contents are initialized to zero, as for anonymous
811 * mapped pages. Technically the new pages are already
812 * initialized to zero since they *are* anonymous mapped
813 * pages, however we have to take care with the contents that
814 * come from the remaining part of the previous page: it may
815 * contains garbage data due to a previous heap usage (grown
816 * then shrunken). */
817 memset(g2h(target_brk), 0, brk_page - target_brk);
818
819 target_brk = new_brk;
820 brk_page = HOST_PAGE_ALIGN(target_brk);
821 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
822 target_brk);
823 return target_brk;
824 } else if (mapped_addr != -1) {
825 /* Mapped but at wrong address, meaning there wasn't actually
826 * enough space for this brk.
827 */
828 target_munmap(mapped_addr, new_alloc_size);
829 mapped_addr = -1;
830 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
831 }
832 else {
833 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
834 }
835
836 #if defined(TARGET_ALPHA)
837 /* We (partially) emulate OSF/1 on Alpha, which requires we
838 return a proper errno, not an unchanged brk value. */
839 return -TARGET_ENOMEM;
840 #endif
841 /* For everything else, return the previous break. */
842 return target_brk;
843 }
844
845 static inline abi_long copy_from_user_fdset(fd_set *fds,
846 abi_ulong target_fds_addr,
847 int n)
848 {
849 int i, nw, j, k;
850 abi_ulong b, *target_fds;
851
852 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
853 if (!(target_fds = lock_user(VERIFY_READ,
854 target_fds_addr,
855 sizeof(abi_ulong) * nw,
856 1)))
857 return -TARGET_EFAULT;
858
859 FD_ZERO(fds);
860 k = 0;
861 for (i = 0; i < nw; i++) {
862 /* grab the abi_ulong */
863 __get_user(b, &target_fds[i]);
864 for (j = 0; j < TARGET_ABI_BITS; j++) {
865 /* check the bit inside the abi_ulong */
866 if ((b >> j) & 1)
867 FD_SET(k, fds);
868 k++;
869 }
870 }
871
872 unlock_user(target_fds, target_fds_addr, 0);
873
874 return 0;
875 }
876
877 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
878 abi_ulong target_fds_addr,
879 int n)
880 {
881 if (target_fds_addr) {
882 if (copy_from_user_fdset(fds, target_fds_addr, n))
883 return -TARGET_EFAULT;
884 *fds_ptr = fds;
885 } else {
886 *fds_ptr = NULL;
887 }
888 return 0;
889 }
890
891 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
892 const fd_set *fds,
893 int n)
894 {
895 int i, nw, j, k;
896 abi_long v;
897 abi_ulong *target_fds;
898
899 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
900 if (!(target_fds = lock_user(VERIFY_WRITE,
901 target_fds_addr,
902 sizeof(abi_ulong) * nw,
903 0)))
904 return -TARGET_EFAULT;
905
906 k = 0;
907 for (i = 0; i < nw; i++) {
908 v = 0;
909 for (j = 0; j < TARGET_ABI_BITS; j++) {
910 v |= ((FD_ISSET(k, fds) != 0) << j);
911 k++;
912 }
913 __put_user(v, &target_fds[i]);
914 }
915
916 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
917
918 return 0;
919 }
920
921 #if defined(__alpha__)
922 #define HOST_HZ 1024
923 #else
924 #define HOST_HZ 100
925 #endif
926
927 static inline abi_long host_to_target_clock_t(long ticks)
928 {
929 #if HOST_HZ == TARGET_HZ
930 return ticks;
931 #else
932 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
933 #endif
934 }
935
936 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
937 const struct rusage *rusage)
938 {
939 struct target_rusage *target_rusage;
940
941 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
942 return -TARGET_EFAULT;
943 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
944 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
945 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
946 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
947 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
948 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
949 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
950 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
951 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
952 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
953 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
954 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
955 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
956 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
957 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
958 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
959 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
960 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
961 unlock_user_struct(target_rusage, target_addr, 1);
962
963 return 0;
964 }
965
966 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
967 {
968 abi_ulong target_rlim_swap;
969 rlim_t result;
970
971 target_rlim_swap = tswapal(target_rlim);
972 if (target_rlim_swap == TARGET_RLIM_INFINITY)
973 return RLIM_INFINITY;
974
975 result = target_rlim_swap;
976 if (target_rlim_swap != (rlim_t)result)
977 return RLIM_INFINITY;
978
979 return result;
980 }
981
982 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
983 {
984 abi_ulong target_rlim_swap;
985 abi_ulong result;
986
987 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
988 target_rlim_swap = TARGET_RLIM_INFINITY;
989 else
990 target_rlim_swap = rlim;
991 result = tswapal(target_rlim_swap);
992
993 return result;
994 }
995
996 static inline int target_to_host_resource(int code)
997 {
998 switch (code) {
999 case TARGET_RLIMIT_AS:
1000 return RLIMIT_AS;
1001 case TARGET_RLIMIT_CORE:
1002 return RLIMIT_CORE;
1003 case TARGET_RLIMIT_CPU:
1004 return RLIMIT_CPU;
1005 case TARGET_RLIMIT_DATA:
1006 return RLIMIT_DATA;
1007 case TARGET_RLIMIT_FSIZE:
1008 return RLIMIT_FSIZE;
1009 case TARGET_RLIMIT_LOCKS:
1010 return RLIMIT_LOCKS;
1011 case TARGET_RLIMIT_MEMLOCK:
1012 return RLIMIT_MEMLOCK;
1013 case TARGET_RLIMIT_MSGQUEUE:
1014 return RLIMIT_MSGQUEUE;
1015 case TARGET_RLIMIT_NICE:
1016 return RLIMIT_NICE;
1017 case TARGET_RLIMIT_NOFILE:
1018 return RLIMIT_NOFILE;
1019 case TARGET_RLIMIT_NPROC:
1020 return RLIMIT_NPROC;
1021 case TARGET_RLIMIT_RSS:
1022 return RLIMIT_RSS;
1023 case TARGET_RLIMIT_RTPRIO:
1024 return RLIMIT_RTPRIO;
1025 case TARGET_RLIMIT_SIGPENDING:
1026 return RLIMIT_SIGPENDING;
1027 case TARGET_RLIMIT_STACK:
1028 return RLIMIT_STACK;
1029 default:
1030 return code;
1031 }
1032 }
1033
1034 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1035 abi_ulong target_tv_addr)
1036 {
1037 struct target_timeval *target_tv;
1038
1039 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1040 return -TARGET_EFAULT;
1041
1042 __get_user(tv->tv_sec, &target_tv->tv_sec);
1043 __get_user(tv->tv_usec, &target_tv->tv_usec);
1044
1045 unlock_user_struct(target_tv, target_tv_addr, 0);
1046
1047 return 0;
1048 }
1049
1050 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1051 const struct timeval *tv)
1052 {
1053 struct target_timeval *target_tv;
1054
1055 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1056 return -TARGET_EFAULT;
1057
1058 __put_user(tv->tv_sec, &target_tv->tv_sec);
1059 __put_user(tv->tv_usec, &target_tv->tv_usec);
1060
1061 unlock_user_struct(target_tv, target_tv_addr, 1);
1062
1063 return 0;
1064 }
1065
1066 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1067 #include <mqueue.h>
1068
1069 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1070 abi_ulong target_mq_attr_addr)
1071 {
1072 struct target_mq_attr *target_mq_attr;
1073
1074 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1075 target_mq_attr_addr, 1))
1076 return -TARGET_EFAULT;
1077
1078 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1079 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1080 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1081 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1082
1083 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1084
1085 return 0;
1086 }
1087
1088 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1089 const struct mq_attr *attr)
1090 {
1091 struct target_mq_attr *target_mq_attr;
1092
1093 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1094 target_mq_attr_addr, 0))
1095 return -TARGET_EFAULT;
1096
1097 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1098 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1099 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1100 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1101
1102 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1103
1104 return 0;
1105 }
1106 #endif
1107
1108 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1109 /* do_select() must return target values and target errnos. */
1110 static abi_long do_select(int n,
1111 abi_ulong rfd_addr, abi_ulong wfd_addr,
1112 abi_ulong efd_addr, abi_ulong target_tv_addr)
1113 {
1114 fd_set rfds, wfds, efds;
1115 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1116 struct timeval tv, *tv_ptr;
1117 abi_long ret;
1118
1119 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1120 if (ret) {
1121 return ret;
1122 }
1123 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1124 if (ret) {
1125 return ret;
1126 }
1127 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1128 if (ret) {
1129 return ret;
1130 }
1131
1132 if (target_tv_addr) {
1133 if (copy_from_user_timeval(&tv, target_tv_addr))
1134 return -TARGET_EFAULT;
1135 tv_ptr = &tv;
1136 } else {
1137 tv_ptr = NULL;
1138 }
1139
1140 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1141
1142 if (!is_error(ret)) {
1143 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1144 return -TARGET_EFAULT;
1145 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1146 return -TARGET_EFAULT;
1147 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1148 return -TARGET_EFAULT;
1149
1150 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1151 return -TARGET_EFAULT;
1152 }
1153
1154 return ret;
1155 }
1156 #endif
1157
1158 static abi_long do_pipe2(int host_pipe[], int flags)
1159 {
1160 #ifdef CONFIG_PIPE2
1161 return pipe2(host_pipe, flags);
1162 #else
1163 return -ENOSYS;
1164 #endif
1165 }
1166
1167 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1168 int flags, int is_pipe2)
1169 {
1170 int host_pipe[2];
1171 abi_long ret;
1172 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1173
1174 if (is_error(ret))
1175 return get_errno(ret);
1176
1177 /* Several targets have special calling conventions for the original
1178 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1179 if (!is_pipe2) {
1180 #if defined(TARGET_ALPHA)
1181 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1182 return host_pipe[0];
1183 #elif defined(TARGET_MIPS)
1184 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1185 return host_pipe[0];
1186 #elif defined(TARGET_SH4)
1187 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1188 return host_pipe[0];
1189 #endif
1190 }
1191
1192 if (put_user_s32(host_pipe[0], pipedes)
1193 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1194 return -TARGET_EFAULT;
1195 return get_errno(ret);
1196 }
1197
1198 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1199 abi_ulong target_addr,
1200 socklen_t len)
1201 {
1202 struct target_ip_mreqn *target_smreqn;
1203
1204 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1205 if (!target_smreqn)
1206 return -TARGET_EFAULT;
1207 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1208 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1209 if (len == sizeof(struct target_ip_mreqn))
1210 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1211 unlock_user(target_smreqn, target_addr, 0);
1212
1213 return 0;
1214 }
1215
1216 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1217 abi_ulong target_addr,
1218 socklen_t len)
1219 {
1220 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1221 sa_family_t sa_family;
1222 struct target_sockaddr *target_saddr;
1223
1224 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1225 if (!target_saddr)
1226 return -TARGET_EFAULT;
1227
1228 sa_family = tswap16(target_saddr->sa_family);
1229
1230 /* Oops. The caller might send a incomplete sun_path; sun_path
1231 * must be terminated by \0 (see the manual page), but
1232 * unfortunately it is quite common to specify sockaddr_un
1233 * length as "strlen(x->sun_path)" while it should be
1234 * "strlen(...) + 1". We'll fix that here if needed.
1235 * Linux kernel has a similar feature.
1236 */
1237
1238 if (sa_family == AF_UNIX) {
1239 if (len < unix_maxlen && len > 0) {
1240 char *cp = (char*)target_saddr;
1241
1242 if ( cp[len-1] && !cp[len] )
1243 len++;
1244 }
1245 if (len > unix_maxlen)
1246 len = unix_maxlen;
1247 }
1248
1249 memcpy(addr, target_saddr, len);
1250 addr->sa_family = sa_family;
1251 unlock_user(target_saddr, target_addr, 0);
1252
1253 return 0;
1254 }
1255
1256 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1257 struct sockaddr *addr,
1258 socklen_t len)
1259 {
1260 struct target_sockaddr *target_saddr;
1261
1262 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1263 if (!target_saddr)
1264 return -TARGET_EFAULT;
1265 memcpy(target_saddr, addr, len);
1266 target_saddr->sa_family = tswap16(addr->sa_family);
1267 unlock_user(target_saddr, target_addr, len);
1268
1269 return 0;
1270 }
1271
1272 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1273 struct target_msghdr *target_msgh)
1274 {
1275 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1276 abi_long msg_controllen;
1277 abi_ulong target_cmsg_addr;
1278 struct target_cmsghdr *target_cmsg;
1279 socklen_t space = 0;
1280
1281 msg_controllen = tswapal(target_msgh->msg_controllen);
1282 if (msg_controllen < sizeof (struct target_cmsghdr))
1283 goto the_end;
1284 target_cmsg_addr = tswapal(target_msgh->msg_control);
1285 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1286 if (!target_cmsg)
1287 return -TARGET_EFAULT;
1288
1289 while (cmsg && target_cmsg) {
1290 void *data = CMSG_DATA(cmsg);
1291 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1292
1293 int len = tswapal(target_cmsg->cmsg_len)
1294 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1295
1296 space += CMSG_SPACE(len);
1297 if (space > msgh->msg_controllen) {
1298 space -= CMSG_SPACE(len);
1299 gemu_log("Host cmsg overflow\n");
1300 break;
1301 }
1302
1303 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1304 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1305 cmsg->cmsg_len = CMSG_LEN(len);
1306
1307 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1308 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1309 memcpy(data, target_data, len);
1310 } else {
1311 int *fd = (int *)data;
1312 int *target_fd = (int *)target_data;
1313 int i, numfds = len / sizeof(int);
1314
1315 for (i = 0; i < numfds; i++)
1316 fd[i] = tswap32(target_fd[i]);
1317 }
1318
1319 cmsg = CMSG_NXTHDR(msgh, cmsg);
1320 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1321 }
1322 unlock_user(target_cmsg, target_cmsg_addr, 0);
1323 the_end:
1324 msgh->msg_controllen = space;
1325 return 0;
1326 }
1327
1328 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1329 struct msghdr *msgh)
1330 {
1331 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1332 abi_long msg_controllen;
1333 abi_ulong target_cmsg_addr;
1334 struct target_cmsghdr *target_cmsg;
1335 socklen_t space = 0;
1336
1337 msg_controllen = tswapal(target_msgh->msg_controllen);
1338 if (msg_controllen < sizeof (struct target_cmsghdr))
1339 goto the_end;
1340 target_cmsg_addr = tswapal(target_msgh->msg_control);
1341 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1342 if (!target_cmsg)
1343 return -TARGET_EFAULT;
1344
1345 while (cmsg && target_cmsg) {
1346 void *data = CMSG_DATA(cmsg);
1347 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1348
1349 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1350
1351 space += TARGET_CMSG_SPACE(len);
1352 if (space > msg_controllen) {
1353 space -= TARGET_CMSG_SPACE(len);
1354 gemu_log("Target cmsg overflow\n");
1355 break;
1356 }
1357
1358 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1359 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1360 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1361
1362 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1363 (cmsg->cmsg_type == SCM_RIGHTS)) {
1364 int *fd = (int *)data;
1365 int *target_fd = (int *)target_data;
1366 int i, numfds = len / sizeof(int);
1367
1368 for (i = 0; i < numfds; i++)
1369 target_fd[i] = tswap32(fd[i]);
1370 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1371 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1372 (len == sizeof(struct timeval))) {
1373 /* copy struct timeval to target */
1374 struct timeval *tv = (struct timeval *)data;
1375 struct target_timeval *target_tv =
1376 (struct target_timeval *)target_data;
1377
1378 target_tv->tv_sec = tswapal(tv->tv_sec);
1379 target_tv->tv_usec = tswapal(tv->tv_usec);
1380 } else {
1381 gemu_log("Unsupported ancillary data: %d/%d\n",
1382 cmsg->cmsg_level, cmsg->cmsg_type);
1383 memcpy(target_data, data, len);
1384 }
1385
1386 cmsg = CMSG_NXTHDR(msgh, cmsg);
1387 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1388 }
1389 unlock_user(target_cmsg, target_cmsg_addr, space);
1390 the_end:
1391 target_msgh->msg_controllen = tswapal(space);
1392 return 0;
1393 }
1394
1395 /* do_setsockopt() Must return target values and target errnos. */
1396 static abi_long do_setsockopt(int sockfd, int level, int optname,
1397 abi_ulong optval_addr, socklen_t optlen)
1398 {
1399 abi_long ret;
1400 int val;
1401 struct ip_mreqn *ip_mreq;
1402 struct ip_mreq_source *ip_mreq_source;
1403
1404 switch(level) {
1405 case SOL_TCP:
1406 /* TCP options all take an 'int' value. */
1407 if (optlen < sizeof(uint32_t))
1408 return -TARGET_EINVAL;
1409
1410 if (get_user_u32(val, optval_addr))
1411 return -TARGET_EFAULT;
1412 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1413 break;
1414 case SOL_IP:
1415 switch(optname) {
1416 case IP_TOS:
1417 case IP_TTL:
1418 case IP_HDRINCL:
1419 case IP_ROUTER_ALERT:
1420 case IP_RECVOPTS:
1421 case IP_RETOPTS:
1422 case IP_PKTINFO:
1423 case IP_MTU_DISCOVER:
1424 case IP_RECVERR:
1425 case IP_RECVTOS:
1426 #ifdef IP_FREEBIND
1427 case IP_FREEBIND:
1428 #endif
1429 case IP_MULTICAST_TTL:
1430 case IP_MULTICAST_LOOP:
1431 val = 0;
1432 if (optlen >= sizeof(uint32_t)) {
1433 if (get_user_u32(val, optval_addr))
1434 return -TARGET_EFAULT;
1435 } else if (optlen >= 1) {
1436 if (get_user_u8(val, optval_addr))
1437 return -TARGET_EFAULT;
1438 }
1439 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1440 break;
1441 case IP_ADD_MEMBERSHIP:
1442 case IP_DROP_MEMBERSHIP:
1443 if (optlen < sizeof (struct target_ip_mreq) ||
1444 optlen > sizeof (struct target_ip_mreqn))
1445 return -TARGET_EINVAL;
1446
1447 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1448 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1449 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1450 break;
1451
1452 case IP_BLOCK_SOURCE:
1453 case IP_UNBLOCK_SOURCE:
1454 case IP_ADD_SOURCE_MEMBERSHIP:
1455 case IP_DROP_SOURCE_MEMBERSHIP:
1456 if (optlen != sizeof (struct target_ip_mreq_source))
1457 return -TARGET_EINVAL;
1458
1459 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1460 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1461 unlock_user (ip_mreq_source, optval_addr, 0);
1462 break;
1463
1464 default:
1465 goto unimplemented;
1466 }
1467 break;
1468 case SOL_RAW:
1469 switch (optname) {
1470 case ICMP_FILTER:
1471 /* struct icmp_filter takes an u32 value */
1472 if (optlen < sizeof(uint32_t)) {
1473 return -TARGET_EINVAL;
1474 }
1475
1476 if (get_user_u32(val, optval_addr)) {
1477 return -TARGET_EFAULT;
1478 }
1479 ret = get_errno(setsockopt(sockfd, level, optname,
1480 &val, sizeof(val)));
1481 break;
1482
1483 default:
1484 goto unimplemented;
1485 }
1486 break;
1487 case TARGET_SOL_SOCKET:
1488 switch (optname) {
1489 /* Options with 'int' argument. */
1490 case TARGET_SO_DEBUG:
1491 optname = SO_DEBUG;
1492 break;
1493 case TARGET_SO_REUSEADDR:
1494 optname = SO_REUSEADDR;
1495 break;
1496 case TARGET_SO_TYPE:
1497 optname = SO_TYPE;
1498 break;
1499 case TARGET_SO_ERROR:
1500 optname = SO_ERROR;
1501 break;
1502 case TARGET_SO_DONTROUTE:
1503 optname = SO_DONTROUTE;
1504 break;
1505 case TARGET_SO_BROADCAST:
1506 optname = SO_BROADCAST;
1507 break;
1508 case TARGET_SO_SNDBUF:
1509 optname = SO_SNDBUF;
1510 break;
1511 case TARGET_SO_RCVBUF:
1512 optname = SO_RCVBUF;
1513 break;
1514 case TARGET_SO_KEEPALIVE:
1515 optname = SO_KEEPALIVE;
1516 break;
1517 case TARGET_SO_OOBINLINE:
1518 optname = SO_OOBINLINE;
1519 break;
1520 case TARGET_SO_NO_CHECK:
1521 optname = SO_NO_CHECK;
1522 break;
1523 case TARGET_SO_PRIORITY:
1524 optname = SO_PRIORITY;
1525 break;
1526 #ifdef SO_BSDCOMPAT
1527 case TARGET_SO_BSDCOMPAT:
1528 optname = SO_BSDCOMPAT;
1529 break;
1530 #endif
1531 case TARGET_SO_PASSCRED:
1532 optname = SO_PASSCRED;
1533 break;
1534 case TARGET_SO_TIMESTAMP:
1535 optname = SO_TIMESTAMP;
1536 break;
1537 case TARGET_SO_RCVLOWAT:
1538 optname = SO_RCVLOWAT;
1539 break;
1540 case TARGET_SO_RCVTIMEO:
1541 optname = SO_RCVTIMEO;
1542 break;
1543 case TARGET_SO_SNDTIMEO:
1544 optname = SO_SNDTIMEO;
1545 break;
1546 break;
1547 default:
1548 goto unimplemented;
1549 }
1550 if (optlen < sizeof(uint32_t))
1551 return -TARGET_EINVAL;
1552
1553 if (get_user_u32(val, optval_addr))
1554 return -TARGET_EFAULT;
1555 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1556 break;
1557 default:
1558 unimplemented:
1559 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1560 ret = -TARGET_ENOPROTOOPT;
1561 }
1562 return ret;
1563 }
1564
1565 /* do_getsockopt() Must return target values and target errnos. */
1566 static abi_long do_getsockopt(int sockfd, int level, int optname,
1567 abi_ulong optval_addr, abi_ulong optlen)
1568 {
1569 abi_long ret;
1570 int len, val;
1571 socklen_t lv;
1572
1573 switch(level) {
1574 case TARGET_SOL_SOCKET:
1575 level = SOL_SOCKET;
1576 switch (optname) {
1577 /* These don't just return a single integer */
1578 case TARGET_SO_LINGER:
1579 case TARGET_SO_RCVTIMEO:
1580 case TARGET_SO_SNDTIMEO:
1581 case TARGET_SO_PEERNAME:
1582 goto unimplemented;
1583 case TARGET_SO_PEERCRED: {
1584 struct ucred cr;
1585 socklen_t crlen;
1586 struct target_ucred *tcr;
1587
1588 if (get_user_u32(len, optlen)) {
1589 return -TARGET_EFAULT;
1590 }
1591 if (len < 0) {
1592 return -TARGET_EINVAL;
1593 }
1594
1595 crlen = sizeof(cr);
1596 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1597 &cr, &crlen));
1598 if (ret < 0) {
1599 return ret;
1600 }
1601 if (len > crlen) {
1602 len = crlen;
1603 }
1604 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1605 return -TARGET_EFAULT;
1606 }
1607 __put_user(cr.pid, &tcr->pid);
1608 __put_user(cr.uid, &tcr->uid);
1609 __put_user(cr.gid, &tcr->gid);
1610 unlock_user_struct(tcr, optval_addr, 1);
1611 if (put_user_u32(len, optlen)) {
1612 return -TARGET_EFAULT;
1613 }
1614 break;
1615 }
1616 /* Options with 'int' argument. */
1617 case TARGET_SO_DEBUG:
1618 optname = SO_DEBUG;
1619 goto int_case;
1620 case TARGET_SO_REUSEADDR:
1621 optname = SO_REUSEADDR;
1622 goto int_case;
1623 case TARGET_SO_TYPE:
1624 optname = SO_TYPE;
1625 goto int_case;
1626 case TARGET_SO_ERROR:
1627 optname = SO_ERROR;
1628 goto int_case;
1629 case TARGET_SO_DONTROUTE:
1630 optname = SO_DONTROUTE;
1631 goto int_case;
1632 case TARGET_SO_BROADCAST:
1633 optname = SO_BROADCAST;
1634 goto int_case;
1635 case TARGET_SO_SNDBUF:
1636 optname = SO_SNDBUF;
1637 goto int_case;
1638 case TARGET_SO_RCVBUF:
1639 optname = SO_RCVBUF;
1640 goto int_case;
1641 case TARGET_SO_KEEPALIVE:
1642 optname = SO_KEEPALIVE;
1643 goto int_case;
1644 case TARGET_SO_OOBINLINE:
1645 optname = SO_OOBINLINE;
1646 goto int_case;
1647 case TARGET_SO_NO_CHECK:
1648 optname = SO_NO_CHECK;
1649 goto int_case;
1650 case TARGET_SO_PRIORITY:
1651 optname = SO_PRIORITY;
1652 goto int_case;
1653 #ifdef SO_BSDCOMPAT
1654 case TARGET_SO_BSDCOMPAT:
1655 optname = SO_BSDCOMPAT;
1656 goto int_case;
1657 #endif
1658 case TARGET_SO_PASSCRED:
1659 optname = SO_PASSCRED;
1660 goto int_case;
1661 case TARGET_SO_TIMESTAMP:
1662 optname = SO_TIMESTAMP;
1663 goto int_case;
1664 case TARGET_SO_RCVLOWAT:
1665 optname = SO_RCVLOWAT;
1666 goto int_case;
1667 default:
1668 goto int_case;
1669 }
1670 break;
1671 case SOL_TCP:
1672 /* TCP options all take an 'int' value. */
1673 int_case:
1674 if (get_user_u32(len, optlen))
1675 return -TARGET_EFAULT;
1676 if (len < 0)
1677 return -TARGET_EINVAL;
1678 lv = sizeof(lv);
1679 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1680 if (ret < 0)
1681 return ret;
1682 if (len > lv)
1683 len = lv;
1684 if (len == 4) {
1685 if (put_user_u32(val, optval_addr))
1686 return -TARGET_EFAULT;
1687 } else {
1688 if (put_user_u8(val, optval_addr))
1689 return -TARGET_EFAULT;
1690 }
1691 if (put_user_u32(len, optlen))
1692 return -TARGET_EFAULT;
1693 break;
1694 case SOL_IP:
1695 switch(optname) {
1696 case IP_TOS:
1697 case IP_TTL:
1698 case IP_HDRINCL:
1699 case IP_ROUTER_ALERT:
1700 case IP_RECVOPTS:
1701 case IP_RETOPTS:
1702 case IP_PKTINFO:
1703 case IP_MTU_DISCOVER:
1704 case IP_RECVERR:
1705 case IP_RECVTOS:
1706 #ifdef IP_FREEBIND
1707 case IP_FREEBIND:
1708 #endif
1709 case IP_MULTICAST_TTL:
1710 case IP_MULTICAST_LOOP:
1711 if (get_user_u32(len, optlen))
1712 return -TARGET_EFAULT;
1713 if (len < 0)
1714 return -TARGET_EINVAL;
1715 lv = sizeof(lv);
1716 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1717 if (ret < 0)
1718 return ret;
1719 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1720 len = 1;
1721 if (put_user_u32(len, optlen)
1722 || put_user_u8(val, optval_addr))
1723 return -TARGET_EFAULT;
1724 } else {
1725 if (len > sizeof(int))
1726 len = sizeof(int);
1727 if (put_user_u32(len, optlen)
1728 || put_user_u32(val, optval_addr))
1729 return -TARGET_EFAULT;
1730 }
1731 break;
1732 default:
1733 ret = -TARGET_ENOPROTOOPT;
1734 break;
1735 }
1736 break;
1737 default:
1738 unimplemented:
1739 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1740 level, optname);
1741 ret = -TARGET_EOPNOTSUPP;
1742 break;
1743 }
1744 return ret;
1745 }
1746
1747 /* FIXME
1748 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1749 * other lock functions have a return code of 0 for failure.
1750 */
1751 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1752 int count, int copy)
1753 {
1754 struct target_iovec *target_vec;
1755 abi_ulong base;
1756 int i;
1757
1758 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1759 if (!target_vec)
1760 return -TARGET_EFAULT;
1761 for(i = 0;i < count; i++) {
1762 base = tswapal(target_vec[i].iov_base);
1763 vec[i].iov_len = tswapal(target_vec[i].iov_len);
1764 if (vec[i].iov_len != 0) {
1765 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1766 /* Don't check lock_user return value. We must call writev even
1767 if a element has invalid base address. */
1768 } else {
1769 /* zero length pointer is ignored */
1770 vec[i].iov_base = NULL;
1771 }
1772 }
1773 unlock_user (target_vec, target_addr, 0);
1774 return 0;
1775 }
1776
1777 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1778 int count, int copy)
1779 {
1780 struct target_iovec *target_vec;
1781 abi_ulong base;
1782 int i;
1783
1784 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1785 if (!target_vec)
1786 return -TARGET_EFAULT;
1787 for(i = 0;i < count; i++) {
1788 if (target_vec[i].iov_base) {
1789 base = tswapal(target_vec[i].iov_base);
1790 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1791 }
1792 }
1793 unlock_user (target_vec, target_addr, 0);
1794
1795 return 0;
1796 }
1797
1798 /* do_socket() Must return target values and target errnos. */
1799 static abi_long do_socket(int domain, int type, int protocol)
1800 {
1801 #if defined(TARGET_MIPS)
1802 switch(type) {
1803 case TARGET_SOCK_DGRAM:
1804 type = SOCK_DGRAM;
1805 break;
1806 case TARGET_SOCK_STREAM:
1807 type = SOCK_STREAM;
1808 break;
1809 case TARGET_SOCK_RAW:
1810 type = SOCK_RAW;
1811 break;
1812 case TARGET_SOCK_RDM:
1813 type = SOCK_RDM;
1814 break;
1815 case TARGET_SOCK_SEQPACKET:
1816 type = SOCK_SEQPACKET;
1817 break;
1818 case TARGET_SOCK_PACKET:
1819 type = SOCK_PACKET;
1820 break;
1821 }
1822 #endif
1823 if (domain == PF_NETLINK)
1824 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1825 return get_errno(socket(domain, type, protocol));
1826 }
1827
1828 /* do_bind() Must return target values and target errnos. */
1829 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1830 socklen_t addrlen)
1831 {
1832 void *addr;
1833 abi_long ret;
1834
1835 if ((int)addrlen < 0) {
1836 return -TARGET_EINVAL;
1837 }
1838
1839 addr = alloca(addrlen+1);
1840
1841 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1842 if (ret)
1843 return ret;
1844
1845 return get_errno(bind(sockfd, addr, addrlen));
1846 }
1847
1848 /* do_connect() Must return target values and target errnos. */
1849 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1850 socklen_t addrlen)
1851 {
1852 void *addr;
1853 abi_long ret;
1854
1855 if ((int)addrlen < 0) {
1856 return -TARGET_EINVAL;
1857 }
1858
1859 addr = alloca(addrlen);
1860
1861 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1862 if (ret)
1863 return ret;
1864
1865 return get_errno(connect(sockfd, addr, addrlen));
1866 }
1867
1868 /* do_sendrecvmsg() Must return target values and target errnos. */
1869 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1870 int flags, int send)
1871 {
1872 abi_long ret, len;
1873 struct target_msghdr *msgp;
1874 struct msghdr msg;
1875 int count;
1876 struct iovec *vec;
1877 abi_ulong target_vec;
1878
1879 /* FIXME */
1880 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1881 msgp,
1882 target_msg,
1883 send ? 1 : 0))
1884 return -TARGET_EFAULT;
1885 if (msgp->msg_name) {
1886 msg.msg_namelen = tswap32(msgp->msg_namelen);
1887 msg.msg_name = alloca(msg.msg_namelen);
1888 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1889 msg.msg_namelen);
1890 if (ret) {
1891 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1892 return ret;
1893 }
1894 } else {
1895 msg.msg_name = NULL;
1896 msg.msg_namelen = 0;
1897 }
1898 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1899 msg.msg_control = alloca(msg.msg_controllen);
1900 msg.msg_flags = tswap32(msgp->msg_flags);
1901
1902 count = tswapal(msgp->msg_iovlen);
1903 vec = alloca(count * sizeof(struct iovec));
1904 target_vec = tswapal(msgp->msg_iov);
1905 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1906 msg.msg_iovlen = count;
1907 msg.msg_iov = vec;
1908
1909 if (send) {
1910 ret = target_to_host_cmsg(&msg, msgp);
1911 if (ret == 0)
1912 ret = get_errno(sendmsg(fd, &msg, flags));
1913 } else {
1914 ret = get_errno(recvmsg(fd, &msg, flags));
1915 if (!is_error(ret)) {
1916 len = ret;
1917 ret = host_to_target_cmsg(msgp, &msg);
1918 if (!is_error(ret)) {
1919 msgp->msg_namelen = tswap32(msg.msg_namelen);
1920 if (msg.msg_name != NULL) {
1921 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1922 msg.msg_name, msg.msg_namelen);
1923 if (ret) {
1924 goto out;
1925 }
1926 }
1927
1928 ret = len;
1929 }
1930 }
1931 }
1932
1933 out:
1934 unlock_iovec(vec, target_vec, count, !send);
1935 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1936 return ret;
1937 }
1938
1939 /* do_accept() Must return target values and target errnos. */
1940 static abi_long do_accept(int fd, abi_ulong target_addr,
1941 abi_ulong target_addrlen_addr)
1942 {
1943 socklen_t addrlen;
1944 void *addr;
1945 abi_long ret;
1946
1947 if (target_addr == 0)
1948 return get_errno(accept(fd, NULL, NULL));
1949
1950 /* linux returns EINVAL if addrlen pointer is invalid */
1951 if (get_user_u32(addrlen, target_addrlen_addr))
1952 return -TARGET_EINVAL;
1953
1954 if ((int)addrlen < 0) {
1955 return -TARGET_EINVAL;
1956 }
1957
1958 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1959 return -TARGET_EINVAL;
1960
1961 addr = alloca(addrlen);
1962
1963 ret = get_errno(accept(fd, addr, &addrlen));
1964 if (!is_error(ret)) {
1965 host_to_target_sockaddr(target_addr, addr, addrlen);
1966 if (put_user_u32(addrlen, target_addrlen_addr))
1967 ret = -TARGET_EFAULT;
1968 }
1969 return ret;
1970 }
1971
1972 /* do_getpeername() Must return target values and target errnos. */
1973 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1974 abi_ulong target_addrlen_addr)
1975 {
1976 socklen_t addrlen;
1977 void *addr;
1978 abi_long ret;
1979
1980 if (get_user_u32(addrlen, target_addrlen_addr))
1981 return -TARGET_EFAULT;
1982
1983 if ((int)addrlen < 0) {
1984 return -TARGET_EINVAL;
1985 }
1986
1987 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1988 return -TARGET_EFAULT;
1989
1990 addr = alloca(addrlen);
1991
1992 ret = get_errno(getpeername(fd, addr, &addrlen));
1993 if (!is_error(ret)) {
1994 host_to_target_sockaddr(target_addr, addr, addrlen);
1995 if (put_user_u32(addrlen, target_addrlen_addr))
1996 ret = -TARGET_EFAULT;
1997 }
1998 return ret;
1999 }
2000
2001 /* do_getsockname() Must return target values and target errnos. */
2002 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2003 abi_ulong target_addrlen_addr)
2004 {
2005 socklen_t addrlen;
2006 void *addr;
2007 abi_long ret;
2008
2009 if (get_user_u32(addrlen, target_addrlen_addr))
2010 return -TARGET_EFAULT;
2011
2012 if ((int)addrlen < 0) {
2013 return -TARGET_EINVAL;
2014 }
2015
2016 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2017 return -TARGET_EFAULT;
2018
2019 addr = alloca(addrlen);
2020
2021 ret = get_errno(getsockname(fd, addr, &addrlen));
2022 if (!is_error(ret)) {
2023 host_to_target_sockaddr(target_addr, addr, addrlen);
2024 if (put_user_u32(addrlen, target_addrlen_addr))
2025 ret = -TARGET_EFAULT;
2026 }
2027 return ret;
2028 }
2029
2030 /* do_socketpair() Must return target values and target errnos. */
2031 static abi_long do_socketpair(int domain, int type, int protocol,
2032 abi_ulong target_tab_addr)
2033 {
2034 int tab[2];
2035 abi_long ret;
2036
2037 ret = get_errno(socketpair(domain, type, protocol, tab));
2038 if (!is_error(ret)) {
2039 if (put_user_s32(tab[0], target_tab_addr)
2040 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2041 ret = -TARGET_EFAULT;
2042 }
2043 return ret;
2044 }
2045
2046 /* do_sendto() Must return target values and target errnos. */
2047 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2048 abi_ulong target_addr, socklen_t addrlen)
2049 {
2050 void *addr;
2051 void *host_msg;
2052 abi_long ret;
2053
2054 if ((int)addrlen < 0) {
2055 return -TARGET_EINVAL;
2056 }
2057
2058 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2059 if (!host_msg)
2060 return -TARGET_EFAULT;
2061 if (target_addr) {
2062 addr = alloca(addrlen);
2063 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2064 if (ret) {
2065 unlock_user(host_msg, msg, 0);
2066 return ret;
2067 }
2068 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2069 } else {
2070 ret = get_errno(send(fd, host_msg, len, flags));
2071 }
2072 unlock_user(host_msg, msg, 0);
2073 return ret;
2074 }
2075
2076 /* do_recvfrom() Must return target values and target errnos. */
2077 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2078 abi_ulong target_addr,
2079 abi_ulong target_addrlen)
2080 {
2081 socklen_t addrlen;
2082 void *addr;
2083 void *host_msg;
2084 abi_long ret;
2085
2086 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2087 if (!host_msg)
2088 return -TARGET_EFAULT;
2089 if (target_addr) {
2090 if (get_user_u32(addrlen, target_addrlen)) {
2091 ret = -TARGET_EFAULT;
2092 goto fail;
2093 }
2094 if ((int)addrlen < 0) {
2095 ret = -TARGET_EINVAL;
2096 goto fail;
2097 }
2098 addr = alloca(addrlen);
2099 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2100 } else {
2101 addr = NULL; /* To keep compiler quiet. */
2102 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2103 }
2104 if (!is_error(ret)) {
2105 if (target_addr) {
2106 host_to_target_sockaddr(target_addr, addr, addrlen);
2107 if (put_user_u32(addrlen, target_addrlen)) {
2108 ret = -TARGET_EFAULT;
2109 goto fail;
2110 }
2111 }
2112 unlock_user(host_msg, msg, len);
2113 } else {
2114 fail:
2115 unlock_user(host_msg, msg, 0);
2116 }
2117 return ret;
2118 }
2119
2120 #ifdef TARGET_NR_socketcall
2121 /* do_socketcall() Must return target values and target errnos. */
2122 static abi_long do_socketcall(int num, abi_ulong vptr)
2123 {
2124 abi_long ret;
2125 const int n = sizeof(abi_ulong);
2126
2127 switch(num) {
2128 case SOCKOP_socket:
2129 {
2130 abi_ulong domain, type, protocol;
2131
2132 if (get_user_ual(domain, vptr)
2133 || get_user_ual(type, vptr + n)
2134 || get_user_ual(protocol, vptr + 2 * n))
2135 return -TARGET_EFAULT;
2136
2137 ret = do_socket(domain, type, protocol);
2138 }
2139 break;
2140 case SOCKOP_bind:
2141 {
2142 abi_ulong sockfd;
2143 abi_ulong target_addr;
2144 socklen_t addrlen;
2145
2146 if (get_user_ual(sockfd, vptr)
2147 || get_user_ual(target_addr, vptr + n)
2148 || get_user_ual(addrlen, vptr + 2 * n))
2149 return -TARGET_EFAULT;
2150
2151 ret = do_bind(sockfd, target_addr, addrlen);
2152 }
2153 break;
2154 case SOCKOP_connect:
2155 {
2156 abi_ulong sockfd;
2157 abi_ulong target_addr;
2158 socklen_t addrlen;
2159
2160 if (get_user_ual(sockfd, vptr)
2161 || get_user_ual(target_addr, vptr + n)
2162 || get_user_ual(addrlen, vptr + 2 * n))
2163 return -TARGET_EFAULT;
2164
2165 ret = do_connect(sockfd, target_addr, addrlen);
2166 }
2167 break;
2168 case SOCKOP_listen:
2169 {
2170 abi_ulong sockfd, backlog;
2171
2172 if (get_user_ual(sockfd, vptr)
2173 || get_user_ual(backlog, vptr + n))
2174 return -TARGET_EFAULT;
2175
2176 ret = get_errno(listen(sockfd, backlog));
2177 }
2178 break;
2179 case SOCKOP_accept:
2180 {
2181 abi_ulong sockfd;
2182 abi_ulong target_addr, target_addrlen;
2183
2184 if (get_user_ual(sockfd, vptr)
2185 || get_user_ual(target_addr, vptr + n)
2186 || get_user_ual(target_addrlen, vptr + 2 * n))
2187 return -TARGET_EFAULT;
2188
2189 ret = do_accept(sockfd, target_addr, target_addrlen);
2190 }
2191 break;
2192 case SOCKOP_getsockname:
2193 {
2194 abi_ulong sockfd;
2195 abi_ulong target_addr, target_addrlen;
2196
2197 if (get_user_ual(sockfd, vptr)
2198 || get_user_ual(target_addr, vptr + n)
2199 || get_user_ual(target_addrlen, vptr + 2 * n))
2200 return -TARGET_EFAULT;
2201
2202 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2203 }
2204 break;
2205 case SOCKOP_getpeername:
2206 {
2207 abi_ulong sockfd;
2208 abi_ulong target_addr, target_addrlen;
2209
2210 if (get_user_ual(sockfd, vptr)
2211 || get_user_ual(target_addr, vptr + n)
2212 || get_user_ual(target_addrlen, vptr + 2 * n))
2213 return -TARGET_EFAULT;
2214
2215 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2216 }
2217 break;
2218 case SOCKOP_socketpair:
2219 {
2220 abi_ulong domain, type, protocol;
2221 abi_ulong tab;
2222
2223 if (get_user_ual(domain, vptr)
2224 || get_user_ual(type, vptr + n)
2225 || get_user_ual(protocol, vptr + 2 * n)
2226 || get_user_ual(tab, vptr + 3 * n))
2227 return -TARGET_EFAULT;
2228
2229 ret = do_socketpair(domain, type, protocol, tab);
2230 }
2231 break;
2232 case SOCKOP_send:
2233 {
2234 abi_ulong sockfd;
2235 abi_ulong msg;
2236 size_t len;
2237 abi_ulong flags;
2238
2239 if (get_user_ual(sockfd, vptr)
2240 || get_user_ual(msg, vptr + n)
2241 || get_user_ual(len, vptr + 2 * n)
2242 || get_user_ual(flags, vptr + 3 * n))
2243 return -TARGET_EFAULT;
2244
2245 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2246 }
2247 break;
2248 case SOCKOP_recv:
2249 {
2250 abi_ulong sockfd;
2251 abi_ulong msg;
2252 size_t len;
2253 abi_ulong flags;
2254
2255 if (get_user_ual(sockfd, vptr)
2256 || get_user_ual(msg, vptr + n)
2257 || get_user_ual(len, vptr + 2 * n)
2258 || get_user_ual(flags, vptr + 3 * n))
2259 return -TARGET_EFAULT;
2260
2261 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2262 }
2263 break;
2264 case SOCKOP_sendto:
2265 {
2266 abi_ulong sockfd;
2267 abi_ulong msg;
2268 size_t len;
2269 abi_ulong flags;
2270 abi_ulong addr;
2271 socklen_t addrlen;
2272
2273 if (get_user_ual(sockfd, vptr)
2274 || get_user_ual(msg, vptr + n)
2275 || get_user_ual(len, vptr + 2 * n)
2276 || get_user_ual(flags, vptr + 3 * n)
2277 || get_user_ual(addr, vptr + 4 * n)
2278 || get_user_ual(addrlen, vptr + 5 * n))
2279 return -TARGET_EFAULT;
2280
2281 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2282 }
2283 break;
2284 case SOCKOP_recvfrom:
2285 {
2286 abi_ulong sockfd;
2287 abi_ulong msg;
2288 size_t len;
2289 abi_ulong flags;
2290 abi_ulong addr;
2291 socklen_t addrlen;
2292
2293 if (get_user_ual(sockfd, vptr)
2294 || get_user_ual(msg, vptr + n)
2295 || get_user_ual(len, vptr + 2 * n)
2296 || get_user_ual(flags, vptr + 3 * n)
2297 || get_user_ual(addr, vptr + 4 * n)
2298 || get_user_ual(addrlen, vptr + 5 * n))
2299 return -TARGET_EFAULT;
2300
2301 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2302 }
2303 break;
2304 case SOCKOP_shutdown:
2305 {
2306 abi_ulong sockfd, how;
2307
2308 if (get_user_ual(sockfd, vptr)
2309 || get_user_ual(how, vptr + n))
2310 return -TARGET_EFAULT;
2311
2312 ret = get_errno(shutdown(sockfd, how));
2313 }
2314 break;
2315 case SOCKOP_sendmsg:
2316 case SOCKOP_recvmsg:
2317 {
2318 abi_ulong fd;
2319 abi_ulong target_msg;
2320 abi_ulong flags;
2321
2322 if (get_user_ual(fd, vptr)
2323 || get_user_ual(target_msg, vptr + n)
2324 || get_user_ual(flags, vptr + 2 * n))
2325 return -TARGET_EFAULT;
2326
2327 ret = do_sendrecvmsg(fd, target_msg, flags,
2328 (num == SOCKOP_sendmsg));
2329 }
2330 break;
2331 case SOCKOP_setsockopt:
2332 {
2333 abi_ulong sockfd;
2334 abi_ulong level;
2335 abi_ulong optname;
2336 abi_ulong optval;
2337 socklen_t optlen;
2338
2339 if (get_user_ual(sockfd, vptr)
2340 || get_user_ual(level, vptr + n)
2341 || get_user_ual(optname, vptr + 2 * n)
2342 || get_user_ual(optval, vptr + 3 * n)
2343 || get_user_ual(optlen, vptr + 4 * n))
2344 return -TARGET_EFAULT;
2345
2346 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2347 }
2348 break;
2349 case SOCKOP_getsockopt:
2350 {
2351 abi_ulong sockfd;
2352 abi_ulong level;
2353 abi_ulong optname;
2354 abi_ulong optval;
2355 socklen_t optlen;
2356
2357 if (get_user_ual(sockfd, vptr)
2358 || get_user_ual(level, vptr + n)
2359 || get_user_ual(optname, vptr + 2 * n)
2360 || get_user_ual(optval, vptr + 3 * n)
2361 || get_user_ual(optlen, vptr + 4 * n))
2362 return -TARGET_EFAULT;
2363
2364 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2365 }
2366 break;
2367 default:
2368 gemu_log("Unsupported socketcall: %d\n", num);
2369 ret = -TARGET_ENOSYS;
2370 break;
2371 }
2372 return ret;
2373 }
2374 #endif
2375
2376 #define N_SHM_REGIONS 32
2377
2378 static struct shm_region {
2379 abi_ulong start;
2380 abi_ulong size;
2381 } shm_regions[N_SHM_REGIONS];
2382
2383 struct target_ipc_perm
2384 {
2385 abi_long __key;
2386 abi_ulong uid;
2387 abi_ulong gid;
2388 abi_ulong cuid;
2389 abi_ulong cgid;
2390 unsigned short int mode;
2391 unsigned short int __pad1;
2392 unsigned short int __seq;
2393 unsigned short int __pad2;
2394 abi_ulong __unused1;
2395 abi_ulong __unused2;
2396 };
2397
2398 struct target_semid_ds
2399 {
2400 struct target_ipc_perm sem_perm;
2401 abi_ulong sem_otime;
2402 abi_ulong __unused1;
2403 abi_ulong sem_ctime;
2404 abi_ulong __unused2;
2405 abi_ulong sem_nsems;
2406 abi_ulong __unused3;
2407 abi_ulong __unused4;
2408 };
2409
2410 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2411 abi_ulong target_addr)
2412 {
2413 struct target_ipc_perm *target_ip;
2414 struct target_semid_ds *target_sd;
2415
2416 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2417 return -TARGET_EFAULT;
2418 target_ip = &(target_sd->sem_perm);
2419 host_ip->__key = tswapal(target_ip->__key);
2420 host_ip->uid = tswapal(target_ip->uid);
2421 host_ip->gid = tswapal(target_ip->gid);
2422 host_ip->cuid = tswapal(target_ip->cuid);
2423 host_ip->cgid = tswapal(target_ip->cgid);
2424 host_ip->mode = tswap16(target_ip->mode);
2425 unlock_user_struct(target_sd, target_addr, 0);
2426 return 0;
2427 }
2428
2429 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2430 struct ipc_perm *host_ip)
2431 {
2432 struct target_ipc_perm *target_ip;
2433 struct target_semid_ds *target_sd;
2434
2435 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2436 return -TARGET_EFAULT;
2437 target_ip = &(target_sd->sem_perm);
2438 target_ip->__key = tswapal(host_ip->__key);
2439 target_ip->uid = tswapal(host_ip->uid);
2440 target_ip->gid = tswapal(host_ip->gid);
2441 target_ip->cuid = tswapal(host_ip->cuid);
2442 target_ip->cgid = tswapal(host_ip->cgid);
2443 target_ip->mode = tswap16(host_ip->mode);
2444 unlock_user_struct(target_sd, target_addr, 1);
2445 return 0;
2446 }
2447
2448 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2449 abi_ulong target_addr)
2450 {
2451 struct target_semid_ds *target_sd;
2452
2453 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2454 return -TARGET_EFAULT;
2455 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2456 return -TARGET_EFAULT;
2457 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2458 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2459 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2460 unlock_user_struct(target_sd, target_addr, 0);
2461 return 0;
2462 }
2463
2464 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2465 struct semid_ds *host_sd)
2466 {
2467 struct target_semid_ds *target_sd;
2468
2469 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2470 return -TARGET_EFAULT;
2471 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2472 return -TARGET_EFAULT;
2473 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2474 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2475 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2476 unlock_user_struct(target_sd, target_addr, 1);
2477 return 0;
2478 }
2479
2480 struct target_seminfo {
2481 int semmap;
2482 int semmni;
2483 int semmns;
2484 int semmnu;
2485 int semmsl;
2486 int semopm;
2487 int semume;
2488 int semusz;
2489 int semvmx;
2490 int semaem;
2491 };
2492
2493 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2494 struct seminfo *host_seminfo)
2495 {
2496 struct target_seminfo *target_seminfo;
2497 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2498 return -TARGET_EFAULT;
2499 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2500 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2501 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2502 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2503 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2504 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2505 __put_user(host_seminfo->semume, &target_seminfo->semume);
2506 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2507 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2508 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2509 unlock_user_struct(target_seminfo, target_addr, 1);
2510 return 0;
2511 }
2512
2513 union semun {
2514 int val;
2515 struct semid_ds *buf;
2516 unsigned short *array;
2517 struct seminfo *__buf;
2518 };
2519
2520 union target_semun {
2521 int val;
2522 abi_ulong buf;
2523 abi_ulong array;
2524 abi_ulong __buf;
2525 };
2526
2527 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2528 abi_ulong target_addr)
2529 {
2530 int nsems;
2531 unsigned short *array;
2532 union semun semun;
2533 struct semid_ds semid_ds;
2534 int i, ret;
2535
2536 semun.buf = &semid_ds;
2537
2538 ret = semctl(semid, 0, IPC_STAT, semun);
2539 if (ret == -1)
2540 return get_errno(ret);
2541
2542 nsems = semid_ds.sem_nsems;
2543
2544 *host_array = malloc(nsems*sizeof(unsigned short));
2545 array = lock_user(VERIFY_READ, target_addr,
2546 nsems*sizeof(unsigned short), 1);
2547 if (!array)
2548 return -TARGET_EFAULT;
2549
2550 for(i=0; i<nsems; i++) {
2551 __get_user((*host_array)[i], &array[i]);
2552 }
2553 unlock_user(array, target_addr, 0);
2554
2555 return 0;
2556 }
2557
2558 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2559 unsigned short **host_array)
2560 {
2561 int nsems;
2562 unsigned short *array;
2563 union semun semun;
2564 struct semid_ds semid_ds;
2565 int i, ret;
2566
2567 semun.buf = &semid_ds;
2568
2569 ret = semctl(semid, 0, IPC_STAT, semun);
2570 if (ret == -1)
2571 return get_errno(ret);
2572
2573 nsems = semid_ds.sem_nsems;
2574
2575 array = lock_user(VERIFY_WRITE, target_addr,
2576 nsems*sizeof(unsigned short), 0);
2577 if (!array)
2578 return -TARGET_EFAULT;
2579
2580 for(i=0; i<nsems; i++) {
2581 __put_user((*host_array)[i], &array[i]);
2582 }
2583 free(*host_array);
2584 unlock_user(array, target_addr, 1);
2585
2586 return 0;
2587 }
2588
2589 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2590 union target_semun target_su)
2591 {
2592 union semun arg;
2593 struct semid_ds dsarg;
2594 unsigned short *array = NULL;
2595 struct seminfo seminfo;
2596 abi_long ret = -TARGET_EINVAL;
2597 abi_long err;
2598 cmd &= 0xff;
2599
2600 switch( cmd ) {
2601 case GETVAL:
2602 case SETVAL:
2603 arg.val = tswap32(target_su.val);
2604 ret = get_errno(semctl(semid, semnum, cmd, arg));
2605 target_su.val = tswap32(arg.val);
2606 break;
2607 case GETALL:
2608 case SETALL:
2609 err = target_to_host_semarray(semid, &array, target_su.array);
2610 if (err)
2611 return err;
2612 arg.array = array;
2613 ret = get_errno(semctl(semid, semnum, cmd, arg));
2614 err = host_to_target_semarray(semid, target_su.array, &array);
2615 if (err)
2616 return err;
2617 break;
2618 case IPC_STAT:
2619 case IPC_SET:
2620 case SEM_STAT:
2621 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2622 if (err)
2623 return err;
2624 arg.buf = &dsarg;
2625 ret = get_errno(semctl(semid, semnum, cmd, arg));
2626 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2627 if (err)
2628 return err;
2629 break;
2630 case IPC_INFO:
2631 case SEM_INFO:
2632 arg.__buf = &seminfo;
2633 ret = get_errno(semctl(semid, semnum, cmd, arg));
2634 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2635 if (err)
2636 return err;
2637 break;
2638 case IPC_RMID:
2639 case GETPID:
2640 case GETNCNT:
2641 case GETZCNT:
2642 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2643 break;
2644 }
2645
2646 return ret;
2647 }
2648
2649 struct target_sembuf {
2650 unsigned short sem_num;
2651 short sem_op;
2652 short sem_flg;
2653 };
2654
2655 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2656 abi_ulong target_addr,
2657 unsigned nsops)
2658 {
2659 struct target_sembuf *target_sembuf;
2660 int i;
2661
2662 target_sembuf = lock_user(VERIFY_READ, target_addr,
2663 nsops*sizeof(struct target_sembuf), 1);
2664 if (!target_sembuf)
2665 return -TARGET_EFAULT;
2666
2667 for(i=0; i<nsops; i++) {
2668 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2669 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2670 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2671 }
2672
2673 unlock_user(target_sembuf, target_addr, 0);
2674
2675 return 0;
2676 }
2677
2678 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2679 {
2680 struct sembuf sops[nsops];
2681
2682 if (target_to_host_sembuf(sops, ptr, nsops))
2683 return -TARGET_EFAULT;
2684
2685 return semop(semid, sops, nsops);
2686 }
2687
2688 struct target_msqid_ds
2689 {
2690 struct target_ipc_perm msg_perm;
2691 abi_ulong msg_stime;
2692 #if TARGET_ABI_BITS == 32
2693 abi_ulong __unused1;
2694 #endif
2695 abi_ulong msg_rtime;
2696 #if TARGET_ABI_BITS == 32
2697 abi_ulong __unused2;
2698 #endif
2699 abi_ulong msg_ctime;
2700 #if TARGET_ABI_BITS == 32
2701 abi_ulong __unused3;
2702 #endif
2703 abi_ulong __msg_cbytes;
2704 abi_ulong msg_qnum;
2705 abi_ulong msg_qbytes;
2706 abi_ulong msg_lspid;
2707 abi_ulong msg_lrpid;
2708 abi_ulong __unused4;
2709 abi_ulong __unused5;
2710 };
2711
2712 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2713 abi_ulong target_addr)
2714 {
2715 struct target_msqid_ds *target_md;
2716
2717 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2718 return -TARGET_EFAULT;
2719 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2720 return -TARGET_EFAULT;
2721 host_md->msg_stime = tswapal(target_md->msg_stime);
2722 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2723 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2724 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2725 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2726 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2727 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2728 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2729 unlock_user_struct(target_md, target_addr, 0);
2730 return 0;
2731 }
2732
2733 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2734 struct msqid_ds *host_md)
2735 {
2736 struct target_msqid_ds *target_md;
2737
2738 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2739 return -TARGET_EFAULT;
2740 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2741 return -TARGET_EFAULT;
2742 target_md->msg_stime = tswapal(host_md->msg_stime);
2743 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2744 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2745 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2746 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2747 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2748 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2749 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2750 unlock_user_struct(target_md, target_addr, 1);
2751 return 0;
2752 }
2753
2754 struct target_msginfo {
2755 int msgpool;
2756 int msgmap;
2757 int msgmax;
2758 int msgmnb;
2759 int msgmni;
2760 int msgssz;
2761 int msgtql;
2762 unsigned short int msgseg;
2763 };
2764
2765 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2766 struct msginfo *host_msginfo)
2767 {
2768 struct target_msginfo *target_msginfo;
2769 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2770 return -TARGET_EFAULT;
2771 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2772 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2773 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2774 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2775 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2776 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2777 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2778 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2779 unlock_user_struct(target_msginfo, target_addr, 1);
2780 return 0;
2781 }
2782
2783 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2784 {
2785 struct msqid_ds dsarg;
2786 struct msginfo msginfo;
2787 abi_long ret = -TARGET_EINVAL;
2788
2789 cmd &= 0xff;
2790
2791 switch (cmd) {
2792 case IPC_STAT:
2793 case IPC_SET:
2794 case MSG_STAT:
2795 if (target_to_host_msqid_ds(&dsarg,ptr))
2796 return -TARGET_EFAULT;
2797 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2798 if (host_to_target_msqid_ds(ptr,&dsarg))
2799 return -TARGET_EFAULT;
2800 break;
2801 case IPC_RMID:
2802 ret = get_errno(msgctl(msgid, cmd, NULL));
2803 break;
2804 case IPC_INFO:
2805 case MSG_INFO:
2806 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2807 if (host_to_target_msginfo(ptr, &msginfo))
2808 return -TARGET_EFAULT;
2809 break;
2810 }
2811
2812 return ret;
2813 }
2814
2815 struct target_msgbuf {
2816 abi_long mtype;
2817 char mtext[1];
2818 };
2819
2820 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2821 unsigned int msgsz, int msgflg)
2822 {
2823 struct target_msgbuf *target_mb;
2824 struct msgbuf *host_mb;
2825 abi_long ret = 0;
2826
2827 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2828 return -TARGET_EFAULT;
2829 host_mb = malloc(msgsz+sizeof(long));
2830 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2831 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2832 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2833 free(host_mb);
2834 unlock_user_struct(target_mb, msgp, 0);
2835
2836 return ret;
2837 }
2838
2839 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2840 unsigned int msgsz, abi_long msgtyp,
2841 int msgflg)
2842 {
2843 struct target_msgbuf *target_mb;
2844 char *target_mtext;
2845 struct msgbuf *host_mb;
2846 abi_long ret = 0;
2847
2848 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2849 return -TARGET_EFAULT;
2850
2851 host_mb = g_malloc(msgsz+sizeof(long));
2852 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2853
2854 if (ret > 0) {
2855 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2856 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2857 if (!target_mtext) {
2858 ret = -TARGET_EFAULT;
2859 goto end;
2860 }
2861 memcpy(target_mb->mtext, host_mb->mtext, ret);
2862 unlock_user(target_mtext, target_mtext_addr, ret);
2863 }
2864
2865 target_mb->mtype = tswapal(host_mb->mtype);
2866
2867 end:
2868 if (target_mb)
2869 unlock_user_struct(target_mb, msgp, 1);
2870 g_free(host_mb);
2871 return ret;
2872 }
2873
2874 struct target_shmid_ds
2875 {
2876 struct target_ipc_perm shm_perm;
2877 abi_ulong shm_segsz;
2878 abi_ulong shm_atime;
2879 #if TARGET_ABI_BITS == 32
2880 abi_ulong __unused1;
2881 #endif
2882 abi_ulong shm_dtime;
2883 #if TARGET_ABI_BITS == 32
2884 abi_ulong __unused2;
2885 #endif
2886 abi_ulong shm_ctime;
2887 #if TARGET_ABI_BITS == 32
2888 abi_ulong __unused3;
2889 #endif
2890 int shm_cpid;
2891 int shm_lpid;
2892 abi_ulong shm_nattch;
2893 unsigned long int __unused4;
2894 unsigned long int __unused5;
2895 };
2896
2897 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2898 abi_ulong target_addr)
2899 {
2900 struct target_shmid_ds *target_sd;
2901
2902 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2903 return -TARGET_EFAULT;
2904 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2905 return -TARGET_EFAULT;
2906 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2907 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2908 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2909 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2910 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2911 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2912 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2913 unlock_user_struct(target_sd, target_addr, 0);
2914 return 0;
2915 }
2916
2917 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2918 struct shmid_ds *host_sd)
2919 {
2920 struct target_shmid_ds *target_sd;
2921
2922 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2923 return -TARGET_EFAULT;
2924 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2925 return -TARGET_EFAULT;
2926 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2927 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2928 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2929 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2930 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2931 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2932 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2933 unlock_user_struct(target_sd, target_addr, 1);
2934 return 0;
2935 }
2936
2937 struct target_shminfo {
2938 abi_ulong shmmax;
2939 abi_ulong shmmin;
2940 abi_ulong shmmni;
2941 abi_ulong shmseg;
2942 abi_ulong shmall;
2943 };
2944
2945 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2946 struct shminfo *host_shminfo)
2947 {
2948 struct target_shminfo *target_shminfo;
2949 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2950 return -TARGET_EFAULT;
2951 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2952 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2953 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2954 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2955 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2956 unlock_user_struct(target_shminfo, target_addr, 1);
2957 return 0;
2958 }
2959
2960 struct target_shm_info {
2961 int used_ids;
2962 abi_ulong shm_tot;
2963 abi_ulong shm_rss;
2964 abi_ulong shm_swp;
2965 abi_ulong swap_attempts;
2966 abi_ulong swap_successes;
2967 };
2968
2969 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2970 struct shm_info *host_shm_info)
2971 {
2972 struct target_shm_info *target_shm_info;
2973 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2974 return -TARGET_EFAULT;
2975 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2976 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2977 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2978 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2979 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2980 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2981 unlock_user_struct(target_shm_info, target_addr, 1);
2982 return 0;
2983 }
2984
2985 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2986 {
2987 struct shmid_ds dsarg;
2988 struct shminfo shminfo;
2989 struct shm_info shm_info;
2990 abi_long ret = -TARGET_EINVAL;
2991
2992 cmd &= 0xff;
2993
2994 switch(cmd) {
2995 case IPC_STAT:
2996 case IPC_SET:
2997 case SHM_STAT:
2998 if (target_to_host_shmid_ds(&dsarg, buf))
2999 return -TARGET_EFAULT;
3000 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3001 if (host_to_target_shmid_ds(buf, &dsarg))
3002 return -TARGET_EFAULT;
3003 break;
3004 case IPC_INFO:
3005 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3006 if (host_to_target_shminfo(buf, &shminfo))
3007 return -TARGET_EFAULT;
3008 break;
3009 case SHM_INFO:
3010 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3011 if (host_to_target_shm_info(buf, &shm_info))
3012 return -TARGET_EFAULT;
3013 break;
3014 case IPC_RMID:
3015 case SHM_LOCK:
3016 case SHM_UNLOCK:
3017 ret = get_errno(shmctl(shmid, cmd, NULL));
3018 break;
3019 }
3020
3021 return ret;
3022 }
3023
3024 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3025 {
3026 abi_long raddr;
3027 void *host_raddr;
3028 struct shmid_ds shm_info;
3029 int i,ret;
3030
3031 /* find out the length of the shared memory segment */
3032 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3033 if (is_error(ret)) {
3034 /* can't get length, bail out */
3035 return ret;
3036 }
3037
3038 mmap_lock();
3039
3040 if (shmaddr)
3041 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3042 else {
3043 abi_ulong mmap_start;
3044
3045 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3046
3047 if (mmap_start == -1) {
3048 errno = ENOMEM;
3049 host_raddr = (void *)-1;
3050 } else
3051 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3052 }
3053
3054 if (host_raddr == (void *)-1) {
3055 mmap_unlock();
3056 return get_errno((long)host_raddr);
3057 }
3058 raddr=h2g((unsigned long)host_raddr);
3059
3060 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3061 PAGE_VALID | PAGE_READ |
3062 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3063
3064 for (i = 0; i < N_SHM_REGIONS; i++) {
3065 if (shm_regions[i].start == 0) {
3066 shm_regions[i].start = raddr;
3067 shm_regions[i].size = shm_info.shm_segsz;
3068 break;
3069 }
3070 }
3071
3072 mmap_unlock();
3073 return raddr;
3074
3075 }
3076
3077 static inline abi_long do_shmdt(abi_ulong shmaddr)
3078 {
3079 int i;
3080
3081 for (i = 0; i < N_SHM_REGIONS; ++i) {
3082 if (shm_regions[i].start == shmaddr) {
3083 shm_regions[i].start = 0;
3084 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3085 break;
3086 }
3087 }
3088
3089 return get_errno(shmdt(g2h(shmaddr)));
3090 }
3091
3092 #ifdef TARGET_NR_ipc
3093 /* ??? This only works with linear mappings. */
3094 /* do_ipc() must return target values and target errnos. */
3095 static abi_long do_ipc(unsigned int call, int first,
3096 int second, int third,
3097 abi_long ptr, abi_long fifth)
3098 {
3099 int version;
3100 abi_long ret = 0;
3101
3102 version = call >> 16;
3103 call &= 0xffff;
3104
3105 switch (call) {
3106 case IPCOP_semop:
3107 ret = do_semop(first, ptr, second);
3108 break;
3109
3110 case IPCOP_semget:
3111 ret = get_errno(semget(first, second, third));
3112 break;
3113
3114 case IPCOP_semctl:
3115 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3116 break;
3117
3118 case IPCOP_msgget:
3119 ret = get_errno(msgget(first, second));
3120 break;
3121
3122 case IPCOP_msgsnd:
3123 ret = do_msgsnd(first, ptr, second, third);
3124 break;
3125
3126 case IPCOP_msgctl:
3127 ret = do_msgctl(first, second, ptr);
3128 break;
3129
3130 case IPCOP_msgrcv:
3131 switch (version) {
3132 case 0:
3133 {
3134 struct target_ipc_kludge {
3135 abi_long msgp;
3136 abi_long msgtyp;
3137 } *tmp;
3138
3139 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3140 ret = -TARGET_EFAULT;
3141 break;
3142 }
3143
3144 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3145
3146 unlock_user_struct(tmp, ptr, 0);
3147 break;
3148 }
3149 default:
3150 ret = do_msgrcv(first, ptr, second, fifth, third);
3151 }
3152 break;
3153
3154 case IPCOP_shmat:
3155 switch (version) {
3156 default:
3157 {
3158 abi_ulong raddr;
3159 raddr = do_shmat(first, ptr, second);
3160 if (is_error(raddr))
3161 return get_errno(raddr);
3162 if (put_user_ual(raddr, third))
3163 return -TARGET_EFAULT;
3164 break;
3165 }
3166 case 1:
3167 ret = -TARGET_EINVAL;
3168 break;
3169 }
3170 break;
3171 case IPCOP_shmdt:
3172 ret = do_shmdt(ptr);
3173 break;
3174
3175 case IPCOP_shmget:
3176 /* IPC_* flag values are the same on all linux platforms */
3177 ret = get_errno(shmget(first, second, third));
3178 break;
3179
3180 /* IPC_* and SHM_* command values are the same on all linux platforms */
3181 case IPCOP_shmctl:
3182 ret = do_shmctl(first, second, third);
3183 break;
3184 default:
3185 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3186 ret = -TARGET_ENOSYS;
3187 break;
3188 }
3189 return ret;
3190 }
3191 #endif
3192
3193 /* kernel structure types definitions */
3194
3195 #define STRUCT(name, ...) STRUCT_ ## name,
3196 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3197 enum {
3198 #include "syscall_types.h"
3199 };
3200 #undef STRUCT
3201 #undef STRUCT_SPECIAL
3202
3203 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3204 #define STRUCT_SPECIAL(name)
3205 #include "syscall_types.h"
3206 #undef STRUCT
3207 #undef STRUCT_SPECIAL
3208
3209 typedef struct IOCTLEntry IOCTLEntry;
3210
3211 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3212 int fd, abi_long cmd, abi_long arg);
3213
3214 struct IOCTLEntry {
3215 unsigned int target_cmd;
3216 unsigned int host_cmd;
3217 const char *name;
3218 int access;
3219 do_ioctl_fn *do_ioctl;
3220 const argtype arg_type[5];
3221 };
3222
3223 #define IOC_R 0x0001
3224 #define IOC_W 0x0002
3225 #define IOC_RW (IOC_R | IOC_W)
3226
3227 #define MAX_STRUCT_SIZE 4096
3228
3229 #ifdef CONFIG_FIEMAP
3230 /* So fiemap access checks don't overflow on 32 bit systems.
3231 * This is very slightly smaller than the limit imposed by
3232 * the underlying kernel.
3233 */
3234 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3235 / sizeof(struct fiemap_extent))
3236
3237 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3238 int fd, abi_long cmd, abi_long arg)
3239 {
3240 /* The parameter for this ioctl is a struct fiemap followed
3241 * by an array of struct fiemap_extent whose size is set
3242 * in fiemap->fm_extent_count. The array is filled in by the
3243 * ioctl.
3244 */
3245 int target_size_in, target_size_out;
3246 struct fiemap *fm;
3247 const argtype *arg_type = ie->arg_type;
3248 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3249 void *argptr, *p;
3250 abi_long ret;
3251 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3252 uint32_t outbufsz;
3253 int free_fm = 0;
3254
3255 assert(arg_type[0] == TYPE_PTR);
3256 assert(ie->access == IOC_RW);
3257 arg_type++;
3258 target_size_in = thunk_type_size(arg_type, 0);
3259 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3260 if (!argptr) {
3261 return -TARGET_EFAULT;
3262 }
3263 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3264 unlock_user(argptr, arg, 0);
3265 fm = (struct fiemap *)buf_temp;
3266 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3267 return -TARGET_EINVAL;
3268 }
3269
3270 outbufsz = sizeof (*fm) +
3271 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3272
3273 if (outbufsz > MAX_STRUCT_SIZE) {
3274 /* We can't fit all the extents into the fixed size buffer.
3275 * Allocate one that is large enough and use it instead.
3276 */
3277 fm = malloc(outbufsz);
3278 if (!fm) {
3279 return -TARGET_ENOMEM;
3280 }
3281 memcpy(fm, buf_temp, sizeof(struct fiemap));
3282 free_fm = 1;
3283 }
3284 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3285 if (!is_error(ret)) {
3286 target_size_out = target_size_in;
3287 /* An extent_count of 0 means we were only counting the extents
3288 * so there are no structs to copy
3289 */
3290 if (fm->fm_extent_count != 0) {
3291 target_size_out += fm->fm_mapped_extents * extent_size;
3292 }
3293 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3294 if (!argptr) {
3295 ret = -TARGET_EFAULT;
3296 } else {
3297 /* Convert the struct fiemap */
3298 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3299 if (fm->fm_extent_count != 0) {
3300 p = argptr + target_size_in;
3301 /* ...and then all the struct fiemap_extents */
3302 for (i = 0; i < fm->fm_mapped_extents; i++) {
3303 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3304 THUNK_TARGET);
3305 p += extent_size;
3306 }
3307 }
3308 unlock_user(argptr, arg, target_size_out);
3309 }
3310 }
3311 if (free_fm) {
3312 free(fm);
3313 }
3314 return ret;
3315 }
3316 #endif
3317
3318 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3319 int fd, abi_long cmd, abi_long arg)
3320 {
3321 const argtype *arg_type = ie->arg_type;
3322 int target_size;
3323 void *argptr;
3324 int ret;
3325 struct ifconf *host_ifconf;
3326 uint32_t outbufsz;
3327 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3328 int target_ifreq_size;
3329 int nb_ifreq;
3330 int free_buf = 0;
3331 int i;
3332 int target_ifc_len;
3333 abi_long target_ifc_buf;
3334 int host_ifc_len;
3335 char *host_ifc_buf;
3336
3337 assert(arg_type[0] == TYPE_PTR);
3338 assert(ie->access == IOC_RW);
3339
3340 arg_type++;
3341 target_size = thunk_type_size(arg_type, 0);
3342
3343 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3344 if (!argptr)
3345 return -TARGET_EFAULT;
3346 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3347 unlock_user(argptr, arg, 0);
3348
3349 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3350 target_ifc_len = host_ifconf->ifc_len;
3351 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3352
3353 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3354 nb_ifreq = target_ifc_len / target_ifreq_size;
3355 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3356
3357 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3358 if (outbufsz > MAX_STRUCT_SIZE) {
3359 /* We can't fit all the extents into the fixed size buffer.
3360 * Allocate one that is large enough and use it instead.
3361 */
3362 host_ifconf = malloc(outbufsz);
3363 if (!host_ifconf) {
3364 return -TARGET_ENOMEM;
3365 }
3366 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3367 free_buf = 1;
3368 }
3369 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3370
3371 host_ifconf->ifc_len = host_ifc_len;
3372 host_ifconf->ifc_buf = host_ifc_buf;
3373
3374 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3375 if (!is_error(ret)) {
3376 /* convert host ifc_len to target ifc_len */
3377
3378 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3379 target_ifc_len = nb_ifreq * target_ifreq_size;
3380 host_ifconf->ifc_len = target_ifc_len;
3381
3382 /* restore target ifc_buf */
3383
3384 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3385
3386 /* copy struct ifconf to target user */
3387
3388 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3389 if (!argptr)
3390 return -TARGET_EFAULT;
3391 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3392 unlock_user(argptr, arg, target_size);
3393
3394 /* copy ifreq[] to target user */
3395
3396 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3397 for (i = 0; i < nb_ifreq ; i++) {
3398 thunk_convert(argptr + i * target_ifreq_size,
3399 host_ifc_buf + i * sizeof(struct ifreq),
3400 ifreq_arg_type, THUNK_TARGET);
3401 }
3402 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3403 }
3404
3405 if (free_buf) {
3406 free(host_ifconf);
3407 }
3408
3409 return ret;
3410 }
3411
3412 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3413 abi_long cmd, abi_long arg)
3414 {
3415 void *argptr;
3416 struct dm_ioctl *host_dm;
3417 abi_long guest_data;
3418 uint32_t guest_data_size;
3419 int target_size;
3420 const argtype *arg_type = ie->arg_type;
3421 abi_long ret;
3422 void *big_buf = NULL;
3423 char *host_data;
3424
3425 arg_type++;
3426 target_size = thunk_type_size(arg_type, 0);
3427 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3428 if (!argptr) {
3429 ret = -TARGET_EFAULT;
3430 goto out;
3431 }
3432 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3433 unlock_user(argptr, arg, 0);
3434
3435 /* buf_temp is too small, so fetch things into a bigger buffer */
3436 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3437 memcpy(big_buf, buf_temp, target_size);
3438 buf_temp = big_buf;
3439 host_dm = big_buf;
3440
3441 guest_data = arg + host_dm->data_start;
3442 if ((guest_data - arg) < 0) {
3443 ret = -EINVAL;
3444 goto out;
3445 }
3446 guest_data_size = host_dm->data_size - host_dm->data_start;
3447 host_data = (char*)host_dm + host_dm->data_start;
3448
3449 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3450 switch (ie->host_cmd) {
3451 case DM_REMOVE_ALL:
3452 case DM_LIST_DEVICES:
3453 case DM_DEV_CREATE:
3454 case DM_DEV_REMOVE:
3455 case DM_DEV_SUSPEND:
3456 case DM_DEV_STATUS:
3457 case DM_DEV_WAIT:
3458 case DM_TABLE_STATUS:
3459 case DM_TABLE_CLEAR:
3460 case DM_TABLE_DEPS:
3461 case DM_LIST_VERSIONS:
3462 /* no input data */
3463 break;
3464 case DM_DEV_RENAME:
3465 case DM_DEV_SET_GEOMETRY:
3466 /* data contains only strings */
3467 memcpy(host_data, argptr, guest_data_size);
3468 break;
3469 case DM_TARGET_MSG:
3470 memcpy(host_data, argptr, guest_data_size);
3471 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3472 break;
3473 case DM_TABLE_LOAD:
3474 {
3475 void *gspec = argptr;
3476 void *cur_data = host_data;
3477 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3478 int spec_size = thunk_type_size(arg_type, 0);
3479 int i;
3480
3481 for (i = 0; i < host_dm->target_count; i++) {
3482 struct dm_target_spec *spec = cur_data;
3483 uint32_t next;
3484 int slen;
3485
3486 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3487 slen = strlen((char*)gspec + spec_size) + 1;
3488 next = spec->next;
3489 spec->next = sizeof(*spec) + slen;
3490 strcpy((char*)&spec[1], gspec + spec_size);
3491 gspec += next;
3492 cur_data += spec->next;
3493 }
3494 break;
3495 }
3496 default:
3497 ret = -TARGET_EINVAL;
3498 goto out;
3499 }
3500 unlock_user(argptr, guest_data, 0);
3501
3502 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3503 if (!is_error(ret)) {
3504 guest_data = arg + host_dm->data_start;
3505 guest_data_size = host_dm->data_size - host_dm->data_start;
3506 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3507 switch (ie->host_cmd) {
3508 case DM_REMOVE_ALL:
3509 case DM_DEV_CREATE:
3510 case DM_DEV_REMOVE:
3511 case DM_DEV_RENAME:
3512 case DM_DEV_SUSPEND:
3513 case DM_DEV_STATUS:
3514 case DM_TABLE_LOAD:
3515 case DM_TABLE_CLEAR:
3516 case DM_TARGET_MSG:
3517 case DM_DEV_SET_GEOMETRY:
3518 /* no return data */
3519 break;
3520 case DM_LIST_DEVICES:
3521 {
3522 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3523 uint32_t remaining_data = guest_data_size;
3524 void *cur_data = argptr;
3525 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3526 int nl_size = 12; /* can't use thunk_size due to alignment */
3527
3528 while (1) {
3529 uint32_t next = nl->next;
3530 if (next) {
3531 nl->next = nl_size + (strlen(nl->name) + 1);
3532 }
3533 if (remaining_data < nl->next) {
3534 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3535 break;
3536 }
3537 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3538 strcpy(cur_data + nl_size, nl->name);
3539 cur_data += nl->next;
3540 remaining_data -= nl->next;
3541 if (!next) {
3542 break;
3543 }
3544 nl = (void*)nl + next;
3545 }
3546 break;
3547 }
3548 case DM_DEV_WAIT:
3549 case DM_TABLE_STATUS:
3550 {
3551 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3552 void *cur_data = argptr;
3553 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3554 int spec_size = thunk_type_size(arg_type, 0);
3555 int i;
3556
3557 for (i = 0; i < host_dm->target_count; i++) {
3558 uint32_t next = spec->next;
3559 int slen = strlen((char*)&spec[1]) + 1;
3560 spec->next = (cur_data - argptr) + spec_size + slen;
3561 if (guest_data_size < spec->next) {
3562 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3563 break;
3564 }
3565 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3566 strcpy(cur_data + spec_size, (char*)&spec[1]);
3567 cur_data = argptr + spec->next;
3568 spec = (void*)host_dm + host_dm->data_start + next;
3569 }
3570 break;
3571 }
3572 case DM_TABLE_DEPS:
3573 {
3574 void *hdata = (void*)host_dm + host_dm->data_start;
3575 int count = *(uint32_t*)hdata;
3576 uint64_t *hdev = hdata + 8;
3577 uint64_t *gdev = argptr + 8;
3578 int i;
3579
3580 *(uint32_t*)argptr = tswap32(count);
3581 for (i = 0; i < count; i++) {
3582 *gdev = tswap64(*hdev);
3583 gdev++;
3584 hdev++;
3585 }
3586 break;
3587 }
3588 case DM_LIST_VERSIONS:
3589 {
3590 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3591 uint32_t remaining_data = guest_data_size;
3592 void *cur_data = argptr;
3593 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3594 int vers_size = thunk_type_size(arg_type, 0);
3595
3596 while (1) {
3597 uint32_t next = vers->next;
3598 if (next) {
3599 vers->next = vers_size + (strlen(vers->name) + 1);
3600 }
3601 if (remaining_data < vers->next) {
3602 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3603 break;
3604 }
3605 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3606 strcpy(cur_data + vers_size, vers->name);
3607 cur_data += vers->next;
3608 remaining_data -= vers->next;
3609 if (!next) {
3610 break;
3611 }
3612 vers = (void*)vers + next;
3613 }
3614 break;
3615 }
3616 default:
3617 ret = -TARGET_EINVAL;
3618 goto out;
3619 }
3620 unlock_user(argptr, guest_data, guest_data_size);
3621
3622 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3623 if (!argptr) {
3624 ret = -TARGET_EFAULT;
3625 goto out;
3626 }
3627 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3628 unlock_user(argptr, arg, target_size);
3629 }
3630 out:
3631 if (big_buf) {
3632 free(big_buf);
3633 }
3634 return ret;
3635 }
3636
3637 static IOCTLEntry ioctl_entries[] = {
3638 #define IOCTL(cmd, access, ...) \
3639 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3640 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3641 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3642 #include "ioctls.h"
3643 { 0, 0, },
3644 };
3645
3646 /* ??? Implement proper locking for ioctls. */
3647 /* do_ioctl() Must return target values and target errnos. */
3648 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3649 {
3650 const IOCTLEntry *ie;
3651 const argtype *arg_type;
3652 abi_long ret;
3653 uint8_t buf_temp[MAX_STRUCT_SIZE];
3654 int target_size;
3655 void *argptr;
3656
3657 ie = ioctl_entries;
3658 for(;;) {
3659 if (ie->target_cmd == 0) {
3660 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3661 return -TARGET_ENOSYS;
3662 }
3663 if (ie->target_cmd == cmd)
3664 break;
3665 ie++;
3666 }
3667 arg_type = ie->arg_type;
3668 #if defined(DEBUG)
3669 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3670 #endif
3671 if (ie->do_ioctl) {
3672 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3673 }
3674
3675 switch(arg_type[0]) {
3676 case TYPE_NULL:
3677 /* no argument */
3678 ret = get_errno(ioctl(fd, ie->host_cmd));
3679 break;
3680 case TYPE_PTRVOID:
3681 case TYPE_INT:
3682 /* int argment */
3683 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3684 break;
3685 case TYPE_PTR:
3686 arg_type++;
3687 target_size = thunk_type_size(arg_type, 0);
3688 switch(ie->access) {
3689 case IOC_R:
3690 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3691 if (!is_error(ret)) {
3692 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3693 if (!argptr)
3694 return -TARGET_EFAULT;
3695 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3696 unlock_user(argptr, arg, target_size);
3697 }
3698 break;
3699 case IOC_W:
3700 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3701 if (!argptr)
3702 return -TARGET_EFAULT;
3703 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3704 unlock_user(argptr, arg, 0);
3705 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3706 break;
3707 default:
3708 case IOC_RW:
3709 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3710 if (!argptr)
3711 return -TARGET_EFAULT;
3712 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3713 unlock_user(argptr, arg, 0);
3714 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3715 if (!is_error(ret)) {
3716 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3717 if (!argptr)
3718 return -TARGET_EFAULT;
3719 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3720 unlock_user(argptr, arg, target_size);
3721 }
3722 break;
3723 }
3724 break;
3725 default:
3726 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3727 (long)cmd, arg_type[0]);
3728 ret = -TARGET_ENOSYS;
3729 break;
3730 }
3731 return ret;
3732 }
3733
3734 static const bitmask_transtbl iflag_tbl[] = {
3735 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3736 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3737 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3738 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3739 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3740 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3741 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3742 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3743 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3744 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3745 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3746 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3747 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3748 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3749 { 0, 0, 0, 0 }
3750 };
3751
3752 static const bitmask_transtbl oflag_tbl[] = {
3753 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3754 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3755 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3756 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3757 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3758 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3759 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3760 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3761 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3762 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3763 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3764 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3765 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3766 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3767 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3768 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3769 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3770 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3771 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3772 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3773 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3774 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3775 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3776 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3777 { 0, 0, 0, 0 }
3778 };
3779
3780 static const bitmask_transtbl cflag_tbl[] = {
3781 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3782 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3783 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3784 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3785 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3786 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3787 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3788 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3789 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3790 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3791 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3792 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3793 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3794 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3795 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3796 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3797 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3798 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3799 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3800 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3801 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3802 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3803 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3804 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3805 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3806 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3807 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3808 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3809 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3810 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3811 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3812 { 0, 0, 0, 0 }
3813 };
3814
3815 static const bitmask_transtbl lflag_tbl[] = {
3816 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3817 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3818 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3819 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3820 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3821 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3822 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3823 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3824 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3825 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3826 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3827 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3828 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3829 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3830 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3831 { 0, 0, 0, 0 }
3832 };
3833
3834 static void target_to_host_termios (void *dst, const void *src)
3835 {
3836 struct host_termios *host = dst;
3837 const struct target_termios *target = src;
3838
3839 host->c_iflag =
3840 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3841 host->c_oflag =
3842 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3843 host->c_cflag =
3844 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3845 host->c_lflag =
3846 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3847 host->c_line = target->c_line;
3848
3849 memset(host->c_cc, 0, sizeof(host->c_cc));
3850 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3851 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3852 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3853 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3854 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3855 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3856 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3857 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3858 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3859 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3860 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3861 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3862 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3863 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3864 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3865 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3866 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3867 }
3868
3869 static void host_to_target_termios (void *dst, const void *src)
3870 {
3871 struct target_termios *target = dst;
3872 const struct host_termios *host = src;
3873
3874 target->c_iflag =
3875 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3876 target->c_oflag =
3877 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3878 target->c_cflag =
3879 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3880 target->c_lflag =
3881 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3882 target->c_line = host->c_line;
3883
3884 memset(target->c_cc, 0, sizeof(target->c_cc));
3885 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3886 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3887 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3888 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3889 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3890 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3891 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3892 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3893 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3894 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3895 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3896 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3897 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3898 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3899 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3900 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3901 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3902 }
3903
3904 static const StructEntry struct_termios_def = {
3905 .convert = { host_to_target_termios, target_to_host_termios },
3906 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3907 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3908 };
3909
3910 static bitmask_transtbl mmap_flags_tbl[] = {
3911 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3912 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3913 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3914 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3915 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3916 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3917 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3918 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3919 { 0, 0, 0, 0 }
3920 };
3921
3922 #if defined(TARGET_I386)
3923
3924 /* NOTE: there is really one LDT for all the threads */
3925 static uint8_t *ldt_table;
3926
3927 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3928 {
3929 int size;
3930 void *p;
3931
3932 if (!ldt_table)
3933 return 0;
3934 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3935 if (size > bytecount)
3936 size = bytecount;
3937 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3938 if (!p)
3939 return -TARGET_EFAULT;
3940 /* ??? Should this by byteswapped? */
3941 memcpy(p, ldt_table, size);
3942 unlock_user(p, ptr, size);
3943 return size;
3944 }
3945
3946 /* XXX: add locking support */
3947 static abi_long write_ldt(CPUX86State *env,
3948 abi_ulong ptr, unsigned long bytecount, int oldmode)
3949 {
3950 struct target_modify_ldt_ldt_s ldt_info;
3951 struct target_modify_ldt_ldt_s *target_ldt_info;
3952 int seg_32bit, contents, read_exec_only, limit_in_pages;
3953 int seg_not_present, useable, lm;
3954 uint32_t *lp, entry_1, entry_2;
3955
3956 if (bytecount != sizeof(ldt_info))
3957 return -TARGET_EINVAL;
3958 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3959 return -TARGET_EFAULT;
3960 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3961 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3962 ldt_info.limit = tswap32(target_ldt_info->limit);
3963 ldt_info.flags = tswap32(target_ldt_info->flags);
3964 unlock_user_struct(target_ldt_info, ptr, 0);
3965
3966 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3967 return -TARGET_EINVAL;
3968 seg_32bit = ldt_info.flags & 1;
3969 contents = (ldt_info.flags >> 1) & 3;
3970 read_exec_only = (ldt_info.flags >> 3) & 1;
3971 limit_in_pages = (ldt_info.flags >> 4) & 1;
3972 seg_not_present = (ldt_info.flags >> 5) & 1;
3973 useable = (ldt_info.flags >> 6) & 1;
3974 #ifdef TARGET_ABI32
3975 lm = 0;
3976 #else
3977 lm = (ldt_info.flags >> 7) & 1;
3978 #endif
3979 if (contents == 3) {
3980 if (oldmode)
3981 return -TARGET_EINVAL;
3982 if (seg_not_present == 0)
3983 return -TARGET_EINVAL;
3984 }
3985 /* allocate the LDT */
3986 if (!ldt_table) {
3987 env->ldt.base = target_mmap(0,
3988 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3989 PROT_READ|PROT_WRITE,
3990 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3991 if (env->ldt.base == -1)
3992 return -TARGET_ENOMEM;
3993 memset(g2h(env->ldt.base), 0,
3994 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3995 env->ldt.limit = 0xffff;
3996 ldt_table = g2h(env->ldt.base);
3997 }
3998
3999 /* NOTE: same code as Linux kernel */
4000 /* Allow LDTs to be cleared by the user. */
4001 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4002 if (oldmode ||
4003 (contents == 0 &&
4004 read_exec_only == 1 &&
4005 seg_32bit == 0 &&
4006 limit_in_pages == 0 &&
4007 seg_not_present == 1 &&
4008 useable == 0 )) {
4009 entry_1 = 0;
4010 entry_2 = 0;
4011 goto install;
4012 }
4013 }
4014
4015 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4016 (ldt_info.limit & 0x0ffff);
4017 entry_2 = (ldt_info.base_addr & 0xff000000) |
4018 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4019 (ldt_info.limit & 0xf0000) |
4020 ((read_exec_only ^ 1) << 9) |
4021 (contents << 10) |
4022 ((seg_not_present ^ 1) << 15) |
4023 (seg_32bit << 22) |
4024 (limit_in_pages << 23) |
4025 (lm << 21) |
4026 0x7000;
4027 if (!oldmode)
4028 entry_2 |= (useable << 20);
4029
4030 /* Install the new entry ... */
4031 install:
4032 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4033 lp[0] = tswap32(entry_1);
4034 lp[1] = tswap32(entry_2);
4035 return 0;
4036 }
4037
4038 /* specific and weird i386 syscalls */
4039 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4040 unsigned long bytecount)
4041 {
4042 abi_long ret;
4043
4044 switch (func) {
4045 case 0:
4046 ret = read_ldt(ptr, bytecount);
4047 break;
4048 case 1:
4049 ret = write_ldt(env, ptr, bytecount, 1);
4050 break;
4051 case 0x11:
4052 ret = write_ldt(env, ptr, bytecount, 0);
4053 break;
4054 default:
4055 ret = -TARGET_ENOSYS;
4056 break;
4057 }
4058 return ret;
4059 }
4060
4061 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4062 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4063 {
4064 uint64_t *gdt_table = g2h(env->gdt.base);
4065 struct target_modify_ldt_ldt_s ldt_info;
4066 struct target_modify_ldt_ldt_s *target_ldt_info;
4067 int seg_32bit, contents, read_exec_only, limit_in_pages;
4068 int seg_not_present, useable, lm;
4069 uint32_t *lp, entry_1, entry_2;
4070 int i;
4071
4072 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4073 if (!target_ldt_info)
4074 return -TARGET_EFAULT;
4075 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4076 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4077 ldt_info.limit = tswap32(target_ldt_info->limit);
4078 ldt_info.flags = tswap32(target_ldt_info->flags);
4079 if (ldt_info.entry_number == -1) {
4080 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4081 if (gdt_table[i] == 0) {
4082 ldt_info.entry_number = i;
4083 target_ldt_info->entry_number = tswap32(i);
4084 break;
4085 }
4086 }
4087 }
4088 unlock_user_struct(target_ldt_info, ptr, 1);
4089
4090 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4091 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4092 return -TARGET_EINVAL;
4093 seg_32bit = ldt_info.flags & 1;
4094 contents = (ldt_info.flags >> 1) & 3;
4095 read_exec_only = (ldt_info.flags >> 3) & 1;
4096 limit_in_pages = (ldt_info.flags >> 4) & 1;
4097 seg_not_present = (ldt_info.flags >> 5) & 1;
4098 useable = (ldt_info.flags >> 6) & 1;
4099 #ifdef TARGET_ABI32
4100 lm = 0;
4101 #else
4102 lm = (ldt_info.flags >> 7) & 1;
4103 #endif
4104
4105 if (contents == 3) {
4106 if (seg_not_present == 0)
4107 return -TARGET_EINVAL;
4108 }
4109
4110 /* NOTE: same code as Linux kernel */
4111 /* Allow LDTs to be cleared by the user. */
4112 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4113 if ((contents == 0 &&
4114 read_exec_only == 1 &&
4115 seg_32bit == 0 &&
4116 limit_in_pages == 0 &&
4117 seg_not_present == 1 &&
4118 useable == 0 )) {
4119 entry_1 = 0;
4120 entry_2 = 0;
4121 goto install;
4122 }
4123 }
4124
4125 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4126 (ldt_info.limit & 0x0ffff);
4127 entry_2 = (ldt_info.base_addr & 0xff000000) |
4128 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4129 (ldt_info.limit & 0xf0000) |
4130 ((read_exec_only ^ 1) << 9) |
4131 (contents << 10) |
4132 ((seg_not_present ^ 1) << 15) |
4133 (seg_32bit << 22) |
4134 (limit_in_pages << 23) |
4135 (useable << 20) |
4136 (lm << 21) |
4137 0x7000;
4138
4139 /* Install the new entry ... */
4140 install:
4141 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4142 lp[0] = tswap32(entry_1);
4143 lp[1] = tswap32(entry_2);
4144 return 0;
4145 }
4146
4147 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4148 {
4149 struct target_modify_ldt_ldt_s *target_ldt_info;
4150 uint64_t *gdt_table = g2h(env->gdt.base);
4151 uint32_t base_addr, limit, flags;
4152 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4153 int seg_not_present, useable, lm;
4154 uint32_t *lp, entry_1, entry_2;
4155
4156 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4157 if (!target_ldt_info)
4158 return -TARGET_EFAULT;
4159 idx = tswap32(target_ldt_info->entry_number);
4160 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4161 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4162 unlock_user_struct(target_ldt_info, ptr, 1);
4163 return -TARGET_EINVAL;
4164 }
4165 lp = (uint32_t *)(gdt_table + idx);
4166 entry_1 = tswap32(lp[0]);
4167 entry_2 = tswap32(lp[1]);
4168
4169 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4170 contents = (entry_2 >> 10) & 3;
4171 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4172 seg_32bit = (entry_2 >> 22) & 1;
4173 limit_in_pages = (entry_2 >> 23) & 1;
4174 useable = (entry_2 >> 20) & 1;
4175 #ifdef TARGET_ABI32
4176 lm = 0;
4177 #else
4178 lm = (entry_2 >> 21) & 1;
4179 #endif
4180 flags = (seg_32bit << 0) | (contents << 1) |
4181 (read_exec_only << 3) | (limit_in_pages << 4) |
4182 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4183 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4184 base_addr = (entry_1 >> 16) |
4185 (entry_2 & 0xff000000) |
4186 ((entry_2 & 0xff) << 16);
4187 target_ldt_info->base_addr = tswapal(base_addr);
4188 target_ldt_info->limit = tswap32(limit);
4189 target_ldt_info->flags = tswap32(flags);
4190 unlock_user_struct(target_ldt_info, ptr, 1);
4191 return 0;
4192 }
4193 #endif /* TARGET_I386 && TARGET_ABI32 */
4194
4195 #ifndef TARGET_ABI32
4196 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4197 {
4198 abi_long ret = 0;
4199 abi_ulong val;
4200 int idx;
4201
4202 switch(code) {
4203 case TARGET_ARCH_SET_GS:
4204 case TARGET_ARCH_SET_FS:
4205 if (code == TARGET_ARCH_SET_GS)
4206 idx = R_GS;
4207 else
4208 idx = R_FS;
4209 cpu_x86_load_seg(env, idx, 0);
4210 env->segs[idx].base = addr;
4211 break;
4212 case TARGET_ARCH_GET_GS:
4213 case TARGET_ARCH_GET_FS:
4214 if (code == TARGET_ARCH_GET_GS)
4215 idx = R_GS;
4216 else
4217 idx = R_FS;
4218 val = env->segs[idx].base;
4219 if (put_user(val, addr, abi_ulong))
4220 ret = -TARGET_EFAULT;
4221 break;
4222 default:
4223 ret = -TARGET_EINVAL;
4224 break;
4225 }
4226 return ret;
4227 }
4228 #endif
4229
4230 #endif /* defined(TARGET_I386) */
4231
4232 #define NEW_STACK_SIZE 0x40000
4233
4234 #if defined(CONFIG_USE_NPTL)
4235
4236 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4237 typedef struct {
4238 CPUArchState *env;
4239 pthread_mutex_t mutex;
4240 pthread_cond_t cond;
4241 pthread_t thread;
4242 uint32_t tid;
4243 abi_ulong child_tidptr;
4244 abi_ulong parent_tidptr;
4245 sigset_t sigmask;
4246 } new_thread_info;
4247
4248 static void *clone_func(void *arg)
4249 {
4250 new_thread_info *info = arg;
4251 CPUArchState *env;
4252 TaskState *ts;
4253
4254 env = info->env;
4255 thread_env = env;
4256 ts = (TaskState *)thread_env->opaque;
4257 info->tid = gettid();
4258 env->host_tid = info->tid;
4259 task_settid(ts);
4260 if (info->child_tidptr)
4261 put_user_u32(info->tid, info->child_tidptr);
4262 if (info->parent_tidptr)
4263 put_user_u32(info->tid, info->parent_tidptr);
4264 /* Enable signals. */
4265 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4266 /* Signal to the parent that we're ready. */
4267 pthread_mutex_lock(&info->mutex);
4268 pthread_cond_broadcast(&info->cond);
4269 pthread_mutex_unlock(&info->mutex);
4270 /* Wait until the parent has finshed initializing the tls state. */
4271 pthread_mutex_lock(&clone_lock);
4272 pthread_mutex_unlock(&clone_lock);
4273 cpu_loop(env);
4274 /* never exits */
4275 return NULL;
4276 }
4277 #else
4278
4279 static int clone_func(void *arg)
4280 {
4281 CPUArchState *env = arg;
4282 cpu_loop(env);
4283 /* never exits */
4284 return 0;
4285 }
4286 #endif
4287
4288 /* do_fork() Must return host values and target errnos (unlike most
4289 do_*() functions). */
4290 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4291 abi_ulong parent_tidptr, target_ulong newtls,
4292 abi_ulong child_tidptr)
4293 {
4294 int ret;
4295 TaskState *ts;
4296 CPUArchState *new_env;
4297 #if defined(CONFIG_USE_NPTL)
4298 unsigned int nptl_flags;
4299 sigset_t sigmask;
4300 #else
4301 uint8_t *new_stack;
4302 #endif
4303
4304 /* Emulate vfork() with fork() */
4305 if (flags & CLONE_VFORK)
4306 flags &= ~(CLONE_VFORK | CLONE_VM);
4307
4308 if (flags & CLONE_VM) {
4309 TaskState *parent_ts = (TaskState *)env->opaque;
4310 #if defined(CONFIG_USE_NPTL)
4311 new_thread_info info;
4312 pthread_attr_t attr;
4313 #endif
4314 ts = g_malloc0(sizeof(TaskState));
4315 init_task_state(ts);
4316 /* we create a new CPU instance. */
4317 new_env = cpu_copy(env);
4318 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4319 cpu_reset(ENV_GET_CPU(new_env));
4320 #endif
4321 /* Init regs that differ from the parent. */
4322 cpu_clone_regs(new_env, newsp);
4323 new_env->opaque = ts;
4324 ts->bprm = parent_ts->bprm;
4325 ts->info = parent_ts->info;
4326 #if defined(CONFIG_USE_NPTL)
4327 nptl_flags = flags;
4328 flags &= ~CLONE_NPTL_FLAGS2;
4329
4330 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4331 ts->child_tidptr = child_tidptr;
4332 }
4333
4334 if (nptl_flags & CLONE_SETTLS)
4335 cpu_set_tls (new_env, newtls);
4336
4337 /* Grab a mutex so that thread setup appears atomic. */
4338 pthread_mutex_lock(&clone_lock);
4339
4340 memset(&info, 0, sizeof(info));
4341 pthread_mutex_init(&info.mutex, NULL);
4342 pthread_mutex_lock(&info.mutex);
4343 pthread_cond_init(&info.cond, NULL);
4344 info.env = new_env;
4345 if (nptl_flags & CLONE_CHILD_SETTID)
4346 info.child_tidptr = child_tidptr;
4347 if (nptl_flags & CLONE_PARENT_SETTID)
4348 info.parent_tidptr = parent_tidptr;
4349
4350 ret = pthread_attr_init(&attr);
4351 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4352 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4353 /* It is not safe to deliver signals until the child has finished
4354 initializing, so temporarily block all signals. */
4355 sigfillset(&sigmask);
4356 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4357
4358 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4359 /* TODO: Free new CPU state if thread creation failed. */
4360
4361 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4362 pthread_attr_destroy(&attr);
4363 if (ret == 0) {
4364 /* Wait for the child to initialize. */
4365 pthread_cond_wait(&info.cond, &info.mutex);
4366 ret = info.tid;
4367 if (flags & CLONE_PARENT_SETTID)
4368 put_user_u32(ret, parent_tidptr);
4369 } else {
4370 ret = -1;
4371 }
4372 pthread_mutex_unlock(&info.mutex);
4373 pthread_cond_destroy(&info.cond);
4374 pthread_mutex_destroy(&info.mutex);
4375 pthread_mutex_unlock(&clone_lock);
4376 #else
4377 if (flags & CLONE_NPTL_FLAGS2)
4378 return -EINVAL;
4379 /* This is probably going to die very quickly, but do it anyway. */
4380 new_stack = g_malloc0 (NEW_STACK_SIZE);
4381 #ifdef __ia64__
4382 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4383 #else
4384 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4385 #endif
4386 #endif
4387 } else {
4388 /* if no CLONE_VM, we consider it is a fork */
4389 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4390 return -EINVAL;
4391 fork_start();
4392 ret = fork();
4393 if (ret == 0) {
4394 /* Child Process. */
4395 cpu_clone_regs(env, newsp);
4396 fork_end(1);
4397 #if defined(CONFIG_USE_NPTL)
4398 /* There is a race condition here. The parent process could
4399 theoretically read the TID in the child process before the child
4400 tid is set. This would require using either ptrace
4401 (not implemented) or having *_tidptr to point at a shared memory
4402 mapping. We can't repeat the spinlock hack used above because
4403 the child process gets its own copy of the lock. */
4404 if (flags & CLONE_CHILD_SETTID)
4405 put_user_u32(gettid(), child_tidptr);
4406 if (flags & CLONE_PARENT_SETTID)
4407 put_user_u32(gettid(), parent_tidptr);
4408 ts = (TaskState *)env->opaque;
4409 if (flags & CLONE_SETTLS)
4410 cpu_set_tls (env, newtls);
4411 if (flags & CLONE_CHILD_CLEARTID)
4412 ts->child_tidptr = child_tidptr;
4413 #endif
4414 } else {
4415 fork_end(0);
4416 }
4417 }
4418 return ret;
4419 }
4420
4421 /* warning : doesn't handle linux specific flags... */
4422 static int target_to_host_fcntl_cmd(int cmd)
4423 {
4424 switch(cmd) {
4425 case TARGET_F_DUPFD:
4426 case TARGET_F_GETFD:
4427 case TARGET_F_SETFD:
4428 case TARGET_F_GETFL:
4429 case TARGET_F_SETFL:
4430 return cmd;
4431 case TARGET_F_GETLK:
4432 return F_GETLK;
4433 case TARGET_F_SETLK:
4434 return F_SETLK;
4435 case TARGET_F_SETLKW:
4436 return F_SETLKW;
4437 case TARGET_F_GETOWN:
4438 return F_GETOWN;
4439 case TARGET_F_SETOWN:
4440 return F_SETOWN;
4441 case TARGET_F_GETSIG:
4442 return F_GETSIG;
4443 case TARGET_F_SETSIG:
4444 return F_SETSIG;
4445 #if TARGET_ABI_BITS == 32
4446 case TARGET_F_GETLK64:
4447 return F_GETLK64;
4448 case TARGET_F_SETLK64:
4449 return F_SETLK64;
4450 case TARGET_F_SETLKW64:
4451 return F_SETLKW64;
4452 #endif
4453 case TARGET_F_SETLEASE:
4454 return F_SETLEASE;
4455 case TARGET_F_GETLEASE:
4456 return F_GETLEASE;
4457 #ifdef F_DUPFD_CLOEXEC
4458 case TARGET_F_DUPFD_CLOEXEC:
4459 return F_DUPFD_CLOEXEC;
4460 #endif
4461 case TARGET_F_NOTIFY:
4462 return F_NOTIFY;
4463 default:
4464 return -TARGET_EINVAL;
4465 }
4466 return -TARGET_EINVAL;
4467 }
4468
4469 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4470 {
4471 struct flock fl;
4472 struct target_flock *target_fl;
4473 struct flock64 fl64;
4474 struct target_flock64 *target_fl64;
4475 abi_long ret;
4476 int host_cmd = target_to_host_fcntl_cmd(cmd);
4477
4478 if (host_cmd == -TARGET_EINVAL)
4479 return host_cmd;
4480
4481 switch(cmd) {
4482 case TARGET_F_GETLK:
4483 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4484 return -TARGET_EFAULT;
4485 fl.l_type = tswap16(target_fl->l_type);
4486 fl.l_whence = tswap16(target_fl->l_whence);
4487 fl.l_start = tswapal(target_fl->l_start);
4488 fl.l_len = tswapal(target_fl->l_len);
4489 fl.l_pid = tswap32(target_fl->l_pid);
4490 unlock_user_struct(target_fl, arg, 0);
4491 ret = get_errno(fcntl(fd, host_cmd, &fl));
4492 if (ret == 0) {
4493 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4494 return -TARGET_EFAULT;
4495 target_fl->l_type = tswap16(fl.l_type);
4496 target_fl->l_whence = tswap16(fl.l_whence);
4497 target_fl->l_start = tswapal(fl.l_start);
4498 target_fl->l_len = tswapal(fl.l_len);
4499 target_fl->l_pid = tswap32(fl.l_pid);
4500 unlock_user_struct(target_fl, arg, 1);
4501 }
4502 break;
4503
4504 case TARGET_F_SETLK:
4505 case TARGET_F_SETLKW:
4506 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4507 return -TARGET_EFAULT;
4508 fl.l_type = tswap16(target_fl->l_type);
4509 fl.l_whence = tswap16(target_fl->l_whence);
4510 fl.l_start = tswapal(target_fl->l_start);
4511 fl.l_len = tswapal(target_fl->l_len);
4512 fl.l_pid = tswap32(target_fl->l_pid);
4513 unlock_user_struct(target_fl, arg, 0);
4514 ret = get_errno(fcntl(fd, host_cmd, &fl));
4515 break;
4516
4517 case TARGET_F_GETLK64:
4518 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4519 return -TARGET_EFAULT;
4520 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4521 fl64.l_whence = tswap16(target_fl64->l_whence);
4522 fl64.l_start = tswap64(target_fl64->l_start);
4523 fl64.l_len = tswap64(target_fl64->l_len);
4524 fl64.l_pid = tswap32(target_fl64->l_pid);
4525 unlock_user_struct(target_fl64, arg, 0);
4526 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4527 if (ret == 0) {
4528 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4529 return -TARGET_EFAULT;
4530 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4531 target_fl64->l_whence = tswap16(fl64.l_whence);
4532 target_fl64->l_start = tswap64(fl64.l_start);
4533 target_fl64->l_len = tswap64(fl64.l_len);
4534 target_fl64->l_pid = tswap32(fl64.l_pid);
4535 unlock_user_struct(target_fl64, arg, 1);
4536 }
4537 break;
4538 case TARGET_F_SETLK64:
4539 case TARGET_F_SETLKW64:
4540 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4541 return -TARGET_EFAULT;
4542 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4543 fl64.l_whence = tswap16(target_fl64->l_whence);
4544 fl64.l_start = tswap64(target_fl64->l_start);
4545 fl64.l_len = tswap64(target_fl64->l_len);
4546 fl64.l_pid = tswap32(target_fl64->l_pid);
4547 unlock_user_struct(target_fl64, arg, 0);
4548 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4549 break;
4550
4551 case TARGET_F_GETFL:
4552 ret = get_errno(fcntl(fd, host_cmd, arg));
4553 if (ret >= 0) {
4554 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4555 }
4556 break;
4557
4558 case TARGET_F_SETFL:
4559 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4560 break;
4561
4562 case TARGET_F_SETOWN:
4563 case TARGET_F_GETOWN:
4564 case TARGET_F_SETSIG:
4565 case TARGET_F_GETSIG:
4566 case TARGET_F_SETLEASE:
4567 case TARGET_F_GETLEASE:
4568 ret = get_errno(fcntl(fd, host_cmd, arg));
4569 break;
4570
4571 default:
4572 ret = get_errno(fcntl(fd, cmd, arg));
4573 break;
4574 }
4575 return ret;
4576 }
4577
4578 #ifdef USE_UID16
4579
4580 static inline int high2lowuid(int uid)
4581 {
4582 if (uid > 65535)
4583 return 65534;
4584 else
4585 return uid;
4586 }
4587
4588 static inline int high2lowgid(int gid)
4589 {
4590 if (gid > 65535)
4591 return 65534;
4592 else
4593 return gid;
4594 }
4595
4596 static inline int low2highuid(int uid)
4597 {
4598 if ((int16_t)uid == -1)
4599 return -1;
4600 else
4601 return uid;
4602 }
4603
4604 static inline int low2highgid(int gid)
4605 {
4606 if ((int16_t)gid == -1)
4607 return -1;
4608 else
4609 return gid;
4610 }
4611 static inline int tswapid(int id)
4612 {
4613 return tswap16(id);
4614 }
4615 #else /* !USE_UID16 */
4616 static inline int high2lowuid(int uid)
4617 {
4618 return uid;
4619 }
4620 static inline int high2lowgid(int gid)
4621 {
4622 return gid;
4623 }
4624 static inline int low2highuid(int uid)
4625 {
4626 return uid;
4627 }
4628 static inline int low2highgid(int gid)
4629 {
4630 return gid;
4631 }
4632 static inline int tswapid(int id)
4633 {
4634 return tswap32(id);
4635 }
4636 #endif /* USE_UID16 */
4637
4638 void syscall_init(void)
4639 {
4640 IOCTLEntry *ie;
4641 const argtype *arg_type;
4642 int size;
4643 int i;
4644
4645 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4646 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4647 #include "syscall_types.h"
4648 #undef STRUCT
4649 #undef STRUCT_SPECIAL
4650
4651 /* Build target_to_host_errno_table[] table from
4652 * host_to_target_errno_table[]. */
4653 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4654 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4655 }
4656
4657 /* we patch the ioctl size if necessary. We rely on the fact that
4658 no ioctl has all the bits at '1' in the size field */
4659 ie = ioctl_entries;
4660 while (ie->target_cmd != 0) {
4661 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4662 TARGET_IOC_SIZEMASK) {
4663 arg_type = ie->arg_type;
4664 if (arg_type[0] != TYPE_PTR) {
4665 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4666 ie->target_cmd);
4667 exit(1);
4668 }
4669 arg_type++;
4670 size = thunk_type_size(arg_type, 0);
4671 ie->target_cmd = (ie->target_cmd &
4672 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4673 (size << TARGET_IOC_SIZESHIFT);
4674 }
4675
4676 /* automatic consistency check if same arch */
4677 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4678 (defined(__x86_64__) && defined(TARGET_X86_64))
4679 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4680 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4681 ie->name, ie->target_cmd, ie->host_cmd);
4682 }
4683 #endif
4684 ie++;
4685 }
4686 }
4687
4688 #if TARGET_ABI_BITS == 32
4689 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4690 {
4691 #ifdef TARGET_WORDS_BIGENDIAN
4692 return ((uint64_t)word0 << 32) | word1;
4693 #else
4694 return ((uint64_t)word1 << 32) | word0;
4695 #endif
4696 }
4697 #else /* TARGET_ABI_BITS == 32 */
4698 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4699 {
4700 return word0;
4701 }
4702 #endif /* TARGET_ABI_BITS != 32 */
4703
4704 #ifdef TARGET_NR_truncate64
4705 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4706 abi_long arg2,
4707 abi_long arg3,
4708 abi_long arg4)
4709 {
4710 if (regpairs_aligned(cpu_env)) {
4711 arg2 = arg3;
4712 arg3 = arg4;
4713 }
4714 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4715 }
4716 #endif
4717
4718 #ifdef TARGET_NR_ftruncate64
4719 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4720 abi_long arg2,
4721 abi_long arg3,
4722 abi_long arg4)
4723 {
4724 if (regpairs_aligned(cpu_env)) {
4725 arg2 = arg3;
4726 arg3 = arg4;
4727 }
4728 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4729 }
4730 #endif
4731
4732 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4733 abi_ulong target_addr)
4734 {
4735 struct target_timespec *target_ts;
4736
4737 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4738 return -TARGET_EFAULT;
4739 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4740 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4741 unlock_user_struct(target_ts, target_addr, 0);
4742 return 0;
4743 }
4744
4745 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4746 struct timespec *host_ts)
4747 {
4748 struct target_timespec *target_ts;
4749
4750 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4751 return -TARGET_EFAULT;
4752 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4753 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4754 unlock_user_struct(target_ts, target_addr, 1);
4755 return 0;
4756 }
4757
4758 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4759 static inline abi_long host_to_target_stat64(void *cpu_env,
4760 abi_ulong target_addr,
4761 struct stat *host_st)
4762 {
4763 #ifdef TARGET_ARM
4764 if (((CPUARMState *)cpu_env)->eabi) {
4765 struct target_eabi_stat64 *target_st;
4766
4767 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4768 return -TARGET_EFAULT;
4769 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4770 __put_user(host_st->st_dev, &target_st->st_dev);
4771 __put_user(host_st->st_ino, &target_st->st_ino);
4772 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4773 __put_user(host_st->st_ino, &target_st->__st_ino);
4774 #endif
4775 __put_user(host_st->st_mode, &target_st->st_mode);
4776 __put_user(host_st->st_nlink, &target_st->st_nlink);
4777 __put_user(host_st->st_uid, &target_st->st_uid);
4778 __put_user(host_st->st_gid, &target_st->st_gid);
4779 __put_user(host_st->st_rdev, &target_st->st_rdev);
4780 __put_user(host_st->st_size, &target_st->st_size);
4781 __put_user(host_st->st_blksize, &target_st->st_blksize);
4782 __put_user(host_st->st_blocks, &target_st->st_blocks);
4783 __put_user(host_st->st_atime, &target_st->target_st_atime);
4784 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4785 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4786 unlock_user_struct(target_st, target_addr, 1);
4787 } else
4788 #endif
4789 {
4790 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4791 struct target_stat *target_st;
4792 #else
4793 struct target_stat64 *target_st;
4794 #endif
4795
4796 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4797 return -TARGET_EFAULT;
4798 memset(target_st, 0, sizeof(*target_st));
4799 __put_user(host_st->st_dev, &target_st->st_dev);
4800 __put_user(host_st->st_ino, &target_st->st_ino);
4801 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4802 __put_user(host_st->st_ino, &target_st->__st_ino);
4803 #endif
4804 __put_user(host_st->st_mode, &target_st->st_mode);
4805 __put_user(host_st->st_nlink, &target_st->st_nlink);
4806 __put_user(host_st->st_uid, &target_st->st_uid);
4807 __put_user(host_st->st_gid, &target_st->st_gid);
4808 __put_user(host_st->st_rdev, &target_st->st_rdev);
4809 /* XXX: better use of kernel struct */
4810 __put_user(host_st->st_size, &target_st->st_size);
4811 __put_user(host_st->st_blksize, &target_st->st_blksize);
4812 __put_user(host_st->st_blocks, &target_st->st_blocks);
4813 __put_user(host_st->st_atime, &target_st->target_st_atime);
4814 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4815 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4816 unlock_user_struct(target_st, target_addr, 1);
4817 }
4818
4819 return 0;
4820 }
4821 #endif
4822
4823 #if defined(CONFIG_USE_NPTL)
4824 /* ??? Using host futex calls even when target atomic operations
4825 are not really atomic probably breaks things. However implementing
4826 futexes locally would make futexes shared between multiple processes
4827 tricky. However they're probably useless because guest atomic
4828 operations won't work either. */
4829 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4830 target_ulong uaddr2, int val3)
4831 {
4832 struct timespec ts, *pts;
4833 int base_op;
4834
4835 /* ??? We assume FUTEX_* constants are the same on both host
4836 and target. */
4837 #ifdef FUTEX_CMD_MASK
4838 base_op = op & FUTEX_CMD_MASK;
4839 #else
4840 base_op = op;
4841 #endif
4842 switch (base_op) {
4843 case FUTEX_WAIT:
4844 if (timeout) {
4845 pts = &ts;
4846 target_to_host_timespec(pts, timeout);
4847 } else {
4848 pts = NULL;
4849 }
4850 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4851 pts, NULL, 0));
4852 case FUTEX_WAKE:
4853 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4854 case FUTEX_FD:
4855 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4856 case FUTEX_REQUEUE:
4857 case FUTEX_CMP_REQUEUE:
4858 case FUTEX_WAKE_OP:
4859 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4860 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4861 But the prototype takes a `struct timespec *'; insert casts
4862 to satisfy the compiler. We do not need to tswap TIMEOUT
4863 since it's not compared to guest memory. */
4864 pts = (struct timespec *)(uintptr_t) timeout;
4865 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4866 g2h(uaddr2),
4867 (base_op == FUTEX_CMP_REQUEUE
4868 ? tswap32(val3)
4869 : val3)));
4870 default:
4871 return -TARGET_ENOSYS;
4872 }
4873 }
4874 #endif
4875
4876 /* Map host to target signal numbers for the wait family of syscalls.
4877 Assume all other status bits are the same. */
4878 static int host_to_target_waitstatus(int status)
4879 {
4880 if (WIFSIGNALED(status)) {
4881 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4882 }
4883 if (WIFSTOPPED(status)) {
4884 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4885 | (status & 0xff);
4886 }
4887 return status;
4888 }
4889
4890 int get_osversion(void)
4891 {
4892 static int osversion;
4893 struct new_utsname buf;
4894 const char *s;
4895 int i, n, tmp;
4896 if (osversion)
4897 return osversion;
4898 if (qemu_uname_release && *qemu_uname_release) {
4899 s = qemu_uname_release;
4900 } else {
4901 if (sys_uname(&buf))
4902 return 0;
4903 s = buf.release;
4904 }
4905 tmp = 0;
4906 for (i = 0; i < 3; i++) {
4907 n = 0;
4908 while (*s >= '0' && *s <= '9') {
4909 n *= 10;
4910 n += *s - '0';
4911 s++;
4912 }
4913 tmp = (tmp << 8) + n;
4914 if (*s == '.')
4915 s++;
4916 }
4917 osversion = tmp;
4918 return osversion;
4919 }
4920
4921
4922 static int open_self_maps(void *cpu_env, int fd)
4923 {
4924 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4925 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4926 #endif
4927 FILE *fp;
4928 char *line = NULL;
4929 size_t len = 0;
4930 ssize_t read;
4931
4932 fp = fopen("/proc/self/maps", "r");
4933 if (fp == NULL) {
4934 return -EACCES;
4935 }
4936
4937 while ((read = getline(&line, &len, fp)) != -1) {
4938 int fields, dev_maj, dev_min, inode;
4939 uint64_t min, max, offset;
4940 char flag_r, flag_w, flag_x, flag_p;
4941 char path[512] = "";
4942 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4943 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4944 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4945
4946 if ((fields < 10) || (fields > 11)) {
4947 continue;
4948 }
4949 if (!strncmp(path, "[stack]", 7)) {
4950 continue;
4951 }
4952 if (h2g_valid(min) && h2g_valid(max)) {
4953 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4954 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n",
4955 h2g(min), h2g(max), flag_r, flag_w,
4956 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4957 path[0] ? " " : "", path);
4958 }
4959 }
4960
4961 free(line);
4962 fclose(fp);
4963
4964 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4965 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4966 (unsigned long long)ts->info->stack_limit,
4967 (unsigned long long)(ts->stack_base + (TARGET_PAGE_SIZE - 1))
4968 & TARGET_PAGE_MASK,
4969 (unsigned long long)0);
4970 #endif
4971
4972 return 0;
4973 }
4974
4975 static int open_self_stat(void *cpu_env, int fd)
4976 {
4977 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4978 abi_ulong start_stack = ts->info->start_stack;
4979 int i;
4980
4981 for (i = 0; i < 44; i++) {
4982 char buf[128];
4983 int len;
4984 uint64_t val = 0;
4985
4986 if (i == 0) {
4987 /* pid */
4988 val = getpid();
4989 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4990 } else if (i == 1) {
4991 /* app name */
4992 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
4993 } else if (i == 27) {
4994 /* stack bottom */
4995 val = start_stack;
4996 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4997 } else {
4998 /* for the rest, there is MasterCard */
4999 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5000 }
5001
5002 len = strlen(buf);
5003 if (write(fd, buf, len) != len) {
5004 return -1;
5005 }
5006 }
5007
5008 return 0;
5009 }
5010
5011 static int open_self_auxv(void *cpu_env, int fd)
5012 {
5013 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5014 abi_ulong auxv = ts->info->saved_auxv;
5015 abi_ulong len = ts->info->auxv_len;
5016 char *ptr;
5017
5018 /*
5019 * Auxiliary vector is stored in target process stack.
5020 * read in whole auxv vector and copy it to file
5021 */
5022 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5023 if (ptr != NULL) {
5024 while (len > 0) {
5025 ssize_t r;
5026 r = write(fd, ptr, len);
5027 if (r <= 0) {
5028 break;
5029 }
5030 len -= r;
5031 ptr += r;
5032 }
5033 lseek(fd, 0, SEEK_SET);
5034 unlock_user(ptr, auxv, len);
5035 }
5036
5037 return 0;
5038 }
5039
5040 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5041 {
5042 struct fake_open {
5043 const char *filename;
5044 int (*fill)(void *cpu_env, int fd);
5045 };
5046 const struct fake_open *fake_open;
5047 static const struct fake_open fakes[] = {
5048 { "/proc/self/maps", open_self_maps },
5049 { "/proc/self/stat", open_self_stat },
5050 { "/proc/self/auxv", open_self_auxv },
5051 { NULL, NULL }
5052 };
5053
5054 for (fake_open = fakes; fake_open->filename; fake_open++) {
5055 if (!strncmp(pathname, fake_open->filename,
5056 strlen(fake_open->filename))) {
5057 break;
5058 }
5059 }
5060
5061 if (fake_open->filename) {
5062 const char *tmpdir;
5063 char filename[PATH_MAX];
5064 int fd, r;
5065
5066 /* create temporary file to map stat to */
5067 tmpdir = getenv("TMPDIR");
5068 if (!tmpdir)
5069 tmpdir = "/tmp";
5070 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5071 fd = mkstemp(filename);
5072 if (fd < 0) {
5073 return fd;
5074 }
5075 unlink(filename);
5076
5077 if ((r = fake_open->fill(cpu_env, fd))) {
5078 close(fd);
5079 return r;
5080 }
5081 lseek(fd, 0, SEEK_SET);
5082
5083 return fd;
5084 }
5085
5086 return get_errno(open(path(pathname), flags, mode));
5087 }
5088
5089 /* do_syscall() should always have a single exit point at the end so
5090 that actions, such as logging of syscall results, can be performed.
5091 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5092 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5093 abi_long arg2, abi_long arg3, abi_long arg4,
5094 abi_long arg5, abi_long arg6, abi_long arg7,
5095 abi_long arg8)
5096 {
5097 abi_long ret;
5098 struct stat st;
5099 struct statfs stfs;
5100 void *p;
5101
5102 #ifdef DEBUG
5103 gemu_log("syscall %d", num);
5104 #endif
5105 if(do_strace)
5106 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5107
5108 switch(num) {
5109 case TARGET_NR_exit:
5110 #ifdef CONFIG_USE_NPTL
5111 /* In old applications this may be used to implement _exit(2).
5112 However in threaded applictions it is used for thread termination,
5113 and _exit_group is used for application termination.
5114 Do thread termination if we have more then one thread. */
5115 /* FIXME: This probably breaks if a signal arrives. We should probably
5116 be disabling signals. */
5117 if (first_cpu->next_cpu) {
5118 TaskState *ts;
5119 CPUArchState **lastp;
5120 CPUArchState *p;
5121
5122 cpu_list_lock();
5123 lastp = &first_cpu;
5124 p = first_cpu;
5125 while (p && p != (CPUArchState *)cpu_env) {
5126 lastp = &p->next_cpu;
5127 p = p->next_cpu;
5128 }
5129 /* If we didn't find the CPU for this thread then something is
5130 horribly wrong. */
5131 if (!p)
5132 abort();
5133 /* Remove the CPU from the list. */
5134 *lastp = p->next_cpu;
5135 cpu_list_unlock();
5136 ts = ((CPUArchState *)cpu_env)->opaque;
5137 if (ts->child_tidptr) {
5138 put_user_u32(0, ts->child_tidptr);
5139 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5140 NULL, NULL, 0);
5141 }
5142 thread_env = NULL;
5143 object_delete(OBJECT(ENV_GET_CPU(cpu_env)));
5144 g_free(ts);
5145 pthread_exit(NULL);
5146 }
5147 #endif
5148 #ifdef TARGET_GPROF
5149 _mcleanup();
5150 #endif
5151 gdb_exit(cpu_env, arg1);
5152 _exit(arg1);
5153 ret = 0; /* avoid warning */
5154 break;
5155 case TARGET_NR_read:
5156 if (arg3 == 0)
5157 ret = 0;
5158 else {
5159 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5160 goto efault;
5161 ret = get_errno(read(arg1, p, arg3));
5162 unlock_user(p, arg2, ret);
5163 }
5164 break;
5165 case TARGET_NR_write:
5166 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5167 goto efault;
5168 ret = get_errno(write(arg1, p, arg3));
5169 unlock_user(p, arg2, 0);
5170 break;
5171 case TARGET_NR_open:
5172 if (!(p = lock_user_string(arg1)))
5173 goto efault;
5174 ret = get_errno(do_open(cpu_env, p,
5175 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5176 arg3));
5177 unlock_user(p, arg1, 0);
5178 break;
5179 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5180 case TARGET_NR_openat:
5181 if (!(p = lock_user_string(arg2)))
5182 goto efault;
5183 ret = get_errno(sys_openat(arg1,
5184 path(p),
5185 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5186 arg4));
5187 unlock_user(p, arg2, 0);
5188 break;
5189 #endif
5190 case TARGET_NR_close:
5191 ret = get_errno(close(arg1));
5192 break;
5193 case TARGET_NR_brk:
5194 ret = do_brk(arg1);
5195 break;
5196 case TARGET_NR_fork:
5197 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5198 break;
5199 #ifdef TARGET_NR_waitpid
5200 case TARGET_NR_waitpid:
5201 {
5202 int status;
5203 ret = get_errno(waitpid(arg1, &status, arg3));
5204 if (!is_error(ret) && arg2 && ret
5205 && put_user_s32(host_to_target_waitstatus(status), arg2))
5206 goto efault;
5207 }
5208 break;
5209 #endif
5210 #ifdef TARGET_NR_waitid
5211 case TARGET_NR_waitid:
5212 {
5213 siginfo_t info;
5214 info.si_pid = 0;
5215 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5216 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5217 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5218 goto efault;
5219 host_to_target_siginfo(p, &info);
5220 unlock_user(p, arg3, sizeof(target_siginfo_t));
5221 }
5222 }
5223 break;
5224 #endif
5225 #ifdef TARGET_NR_creat /* not on alpha */
5226 case TARGET_NR_creat:
5227 if (!(p = lock_user_string(arg1)))
5228 goto efault;
5229 ret = get_errno(creat(p, arg2));
5230 unlock_user(p, arg1, 0);
5231 break;
5232 #endif
5233 case TARGET_NR_link:
5234 {
5235 void * p2;
5236 p = lock_user_string(arg1);
5237 p2 = lock_user_string(arg2);
5238 if (!p || !p2)
5239 ret = -TARGET_EFAULT;
5240 else
5241 ret = get_errno(link(p, p2));
5242 unlock_user(p2, arg2, 0);
5243 unlock_user(p, arg1, 0);
5244 }
5245 break;
5246 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5247 case TARGET_NR_linkat:
5248 {
5249 void * p2 = NULL;
5250 if (!arg2 || !arg4)
5251 goto efault;
5252 p = lock_user_string(arg2);
5253 p2 = lock_user_string(arg4);
5254 if (!p || !p2)
5255 ret = -TARGET_EFAULT;
5256 else
5257 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
5258 unlock_user(p, arg2, 0);
5259 unlock_user(p2, arg4, 0);
5260 }
5261 break;
5262 #endif
5263 case TARGET_NR_unlink:
5264 if (!(p = lock_user_string(arg1)))
5265 goto efault;
5266 ret = get_errno(unlink(p));
5267 unlock_user(p, arg1, 0);
5268 break;
5269 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5270 case TARGET_NR_unlinkat:
5271 if (!(p = lock_user_string(arg2)))
5272 goto efault;
5273 ret = get_errno(sys_unlinkat(arg1, p, arg3));
5274 unlock_user(p, arg2, 0);
5275 break;
5276 #endif
5277 case TARGET_NR_execve:
5278 {
5279 char **argp, **envp;
5280 int argc, envc;
5281 abi_ulong gp;
5282 abi_ulong guest_argp;
5283 abi_ulong guest_envp;
5284 abi_ulong addr;
5285 char **q;
5286 int total_size = 0;
5287
5288 argc = 0;
5289 guest_argp = arg2;
5290 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5291 if (get_user_ual(addr, gp))
5292 goto efault;
5293 if (!addr)
5294 break;
5295 argc++;
5296 }
5297 envc = 0;
5298 guest_envp = arg3;
5299 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5300 if (get_user_ual(addr, gp))
5301 goto efault;
5302 if (!addr)
5303 break;
5304 envc++;
5305 }
5306
5307 argp = alloca((argc + 1) * sizeof(void *));
5308 envp = alloca((envc + 1) * sizeof(void *));
5309
5310 for (gp = guest_argp, q = argp; gp;
5311 gp += sizeof(abi_ulong), q++) {
5312 if (get_user_ual(addr, gp))
5313 goto execve_efault;
5314 if (!addr)
5315 break;
5316 if (!(*q = lock_user_string(addr)))
5317 goto execve_efault;
5318 total_size += strlen(*q) + 1;
5319 }
5320 *q = NULL;
5321
5322 for (gp = guest_envp, q = envp; gp;
5323 gp += sizeof(abi_ulong), q++) {
5324 if (get_user_ual(addr, gp))
5325 goto execve_efault;
5326 if (!addr)
5327 break;
5328 if (!(*q = lock_user_string(addr)))
5329 goto execve_efault;
5330 total_size += strlen(*q) + 1;
5331 }
5332 *q = NULL;
5333
5334 /* This case will not be caught by the host's execve() if its
5335 page size is bigger than the target's. */
5336 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5337 ret = -TARGET_E2BIG;
5338 goto execve_end;
5339 }
5340 if (!(p = lock_user_string(arg1)))
5341 goto execve_efault;
5342 ret = get_errno(execve(p, argp, envp));
5343 unlock_user(p, arg1, 0);
5344
5345 goto execve_end;
5346
5347 execve_efault:
5348 ret = -TARGET_EFAULT;
5349
5350 execve_end:
5351 for (gp = guest_argp, q = argp; *q;
5352 gp += sizeof(abi_ulong), q++) {
5353 if (get_user_ual(addr, gp)
5354 || !addr)
5355 break;
5356 unlock_user(*q, addr, 0);
5357 }
5358 for (gp = guest_envp, q = envp; *q;
5359 gp += sizeof(abi_ulong), q++) {
5360 if (get_user_ual(addr, gp)
5361 || !addr)
5362 break;
5363 unlock_user(*q, addr, 0);
5364 }
5365 }
5366 break;
5367 case TARGET_NR_chdir:
5368 if (!(p = lock_user_string(arg1)))
5369 goto efault;
5370 ret = get_errno(chdir(p));
5371 unlock_user(p, arg1, 0);
5372 break;
5373 #ifdef TARGET_NR_time
5374 case TARGET_NR_time:
5375 {
5376 time_t host_time;
5377 ret = get_errno(time(&host_time));
5378 if (!is_error(ret)
5379 && arg1
5380 && put_user_sal(host_time, arg1))
5381 goto efault;
5382 }
5383 break;
5384 #endif
5385 case TARGET_NR_mknod:
5386 if (!(p = lock_user_string(arg1)))
5387 goto efault;
5388 ret = get_errno(mknod(p, arg2, arg3));
5389 unlock_user(p, arg1, 0);
5390 break;
5391 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5392 case TARGET_NR_mknodat:
5393 if (!(p = lock_user_string(arg2)))
5394 goto efault;
5395 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5396 unlock_user(p, arg2, 0);
5397 break;
5398 #endif
5399 case TARGET_NR_chmod:
5400 if (!(p = lock_user_string(arg1)))
5401 goto efault;
5402 ret = get_errno(chmod(p, arg2));
5403 unlock_user(p, arg1, 0);
5404 break;
5405 #ifdef TARGET_NR_break
5406 case TARGET_NR_break:
5407 goto unimplemented;
5408 #endif
5409 #ifdef TARGET_NR_oldstat
5410 case TARGET_NR_oldstat:
5411 goto unimplemented;
5412 #endif
5413 case TARGET_NR_lseek:
5414 ret = get_errno(lseek(arg1, arg2, arg3));
5415 break;
5416 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5417 /* Alpha specific */
5418 case TARGET_NR_getxpid:
5419 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5420 ret = get_errno(getpid());
5421 break;
5422 #endif
5423 #ifdef TARGET_NR_getpid
5424 case TARGET_NR_getpid:
5425 ret = get_errno(getpid());
5426 break;
5427 #endif
5428 case TARGET_NR_mount:
5429 {
5430 /* need to look at the data field */
5431 void *p2, *p3;
5432 p = lock_user_string(arg1);
5433 p2 = lock_user_string(arg2);
5434 p3 = lock_user_string(arg3);
5435 if (!p || !p2 || !p3)
5436 ret = -TARGET_EFAULT;
5437 else {
5438 /* FIXME - arg5 should be locked, but it isn't clear how to
5439 * do that since it's not guaranteed to be a NULL-terminated
5440 * string.
5441 */
5442 if ( ! arg5 )
5443 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5444 else
5445 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5446 }
5447 unlock_user(p, arg1, 0);
5448 unlock_user(p2, arg2, 0);
5449 unlock_user(p3, arg3, 0);
5450 break;
5451 }
5452 #ifdef TARGET_NR_umount
5453 case TARGET_NR_umount:
5454 if (!(p = lock_user_string(arg1)))
5455 goto efault;
5456 ret = get_errno(umount(p));
5457 unlock_user(p, arg1, 0);
5458 break;
5459 #endif
5460 #ifdef TARGET_NR_stime /* not on alpha */
5461 case TARGET_NR_stime:
5462 {
5463 time_t host_time;
5464 if (get_user_sal(host_time, arg1))
5465 goto efault;
5466 ret = get_errno(stime(&host_time));
5467 }
5468 break;
5469 #endif
5470 case TARGET_NR_ptrace:
5471 goto unimplemented;
5472 #ifdef TARGET_NR_alarm /* not on alpha */
5473 case TARGET_NR_alarm:
5474 ret = alarm(arg1);
5475 break;
5476 #endif
5477 #ifdef TARGET_NR_oldfstat
5478 case TARGET_NR_oldfstat:
5479 goto unimplemented;
5480 #endif
5481 #ifdef TARGET_NR_pause /* not on alpha */
5482 case TARGET_NR_pause:
5483 ret = get_errno(pause());
5484 break;
5485 #endif
5486 #ifdef TARGET_NR_utime
5487 case TARGET_NR_utime:
5488 {
5489 struct utimbuf tbuf, *host_tbuf;
5490 struct target_utimbuf *target_tbuf;
5491 if (arg2) {
5492 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5493 goto efault;
5494 tbuf.actime = tswapal(target_tbuf->actime);
5495 tbuf.modtime = tswapal(target_tbuf->modtime);
5496 unlock_user_struct(target_tbuf, arg2, 0);
5497 host_tbuf = &tbuf;
5498 } else {
5499 host_tbuf = NULL;
5500 }
5501 if (!(p = lock_user_string(arg1)))
5502 goto efault;
5503 ret = get_errno(utime(p, host_tbuf));
5504 unlock_user(p, arg1, 0);
5505 }
5506 break;
5507 #endif
5508 case TARGET_NR_utimes:
5509 {
5510 struct timeval *tvp, tv[2];
5511 if (arg2) {
5512 if (copy_from_user_timeval(&tv[0], arg2)
5513 || copy_from_user_timeval(&tv[1],
5514 arg2 + sizeof(struct target_timeval)))
5515 goto efault;
5516 tvp = tv;
5517 } else {
5518 tvp = NULL;
5519 }
5520 if (!(p = lock_user_string(arg1)))
5521 goto efault;
5522 ret = get_errno(utimes(p, tvp));
5523 unlock_user(p, arg1, 0);
5524 }
5525 break;
5526 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5527 case TARGET_NR_futimesat:
5528 {
5529 struct timeval *tvp, tv[2];
5530 if (arg3) {
5531 if (copy_from_user_timeval(&tv[0], arg3)
5532 || copy_from_user_timeval(&tv[1],
5533 arg3 + sizeof(struct target_timeval)))
5534 goto efault;
5535 tvp = tv;
5536 } else {
5537 tvp = NULL;
5538 }
5539 if (!(p = lock_user_string(arg2)))
5540 goto efault;
5541 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5542 unlock_user(p, arg2, 0);
5543 }
5544 break;
5545 #endif
5546 #ifdef TARGET_NR_stty
5547 case TARGET_NR_stty:
5548 goto unimplemented;
5549 #endif
5550 #ifdef TARGET_NR_gtty
5551 case TARGET_NR_gtty:
5552 goto unimplemented;
5553 #endif
5554 case TARGET_NR_access:
5555 if (!(p = lock_user_string(arg1)))
5556 goto efault;
5557 ret = get_errno(access(path(p), arg2));
5558 unlock_user(p, arg1, 0);
5559 break;
5560 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5561 case TARGET_NR_faccessat:
5562 if (!(p = lock_user_string(arg2)))
5563 goto efault;
5564 ret = get_errno(sys_faccessat(arg1, p, arg3));
5565 unlock_user(p, arg2, 0);
5566 break;
5567 #endif
5568 #ifdef TARGET_NR_nice /* not on alpha */
5569 case TARGET_NR_nice:
5570 ret = get_errno(nice(arg1));
5571 break;
5572 #endif
5573 #ifdef TARGET_NR_ftime
5574 case TARGET_NR_ftime:
5575 goto unimplemented;
5576 #endif
5577 case TARGET_NR_sync:
5578 sync();
5579 ret = 0;
5580 break;
5581 case TARGET_NR_kill:
5582 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5583 break;
5584 case TARGET_NR_rename:
5585 {
5586 void *p2;
5587 p = lock_user_string(arg1);
5588 p2 = lock_user_string(arg2);
5589 if (!p || !p2)
5590 ret = -TARGET_EFAULT;
5591 else
5592 ret = get_errno(rename(p, p2));
5593 unlock_user(p2, arg2, 0);
5594 unlock_user(p, arg1, 0);
5595 }
5596 break;
5597 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5598 case TARGET_NR_renameat:
5599 {
5600 void *p2;
5601 p = lock_user_string(arg2);
5602 p2 = lock_user_string(arg4);
5603 if (!p || !p2)
5604 ret = -TARGET_EFAULT;
5605 else
5606 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5607 unlock_user(p2, arg4, 0);
5608 unlock_user(p, arg2, 0);
5609 }
5610 break;
5611 #endif
5612 case TARGET_NR_mkdir:
5613 if (!(p = lock_user_string(arg1)))
5614 goto efault;
5615 ret = get_errno(mkdir(p, arg2));
5616 unlock_user(p, arg1, 0);
5617 break;
5618 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5619 case TARGET_NR_mkdirat:
5620 if (!(p = lock_user_string(arg2)))
5621 goto efault;
5622 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5623 unlock_user(p, arg2, 0);
5624 break;
5625 #endif
5626 case TARGET_NR_rmdir:
5627 if (!(p = lock_user_string(arg1)))
5628 goto efault;
5629 ret = get_errno(rmdir(p));
5630 unlock_user(p, arg1, 0);
5631 break;
5632 case TARGET_NR_dup:
5633 ret = get_errno(dup(arg1));
5634 break;
5635 case TARGET_NR_pipe:
5636 ret = do_pipe(cpu_env, arg1, 0, 0);
5637 break;
5638 #ifdef TARGET_NR_pipe2
5639 case TARGET_NR_pipe2:
5640 ret = do_pipe(cpu_env, arg1,
5641 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5642 break;
5643 #endif
5644 case TARGET_NR_times:
5645 {
5646 struct target_tms *tmsp;
5647 struct tms tms;
5648 ret = get_errno(times(&tms));
5649 if (arg1) {
5650 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5651 if (!tmsp)
5652 goto efault;
5653 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5654 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5655 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5656 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5657 }
5658 if (!is_error(ret))
5659 ret = host_to_target_clock_t(ret);
5660 }
5661 break;
5662 #ifdef TARGET_NR_prof
5663 case TARGET_NR_prof:
5664 goto unimplemented;
5665 #endif
5666 #ifdef TARGET_NR_signal
5667 case TARGET_NR_signal:
5668 goto unimplemented;
5669 #endif
5670 case TARGET_NR_acct:
5671 if (arg1 == 0) {
5672 ret = get_errno(acct(NULL));
5673 } else {
5674 if (!(p = lock_user_string(arg1)))
5675 goto efault;
5676 ret = get_errno(acct(path(p)));
5677 unlock_user(p, arg1, 0);
5678 }
5679 break;
5680 #ifdef TARGET_NR_umount2 /* not on alpha */
5681 case TARGET_NR_umount2:
5682 if (!(p = lock_user_string(arg1)))
5683 goto efault;
5684 ret = get_errno(umount2(p, arg2));
5685 unlock_user(p, arg1, 0);
5686 break;
5687 #endif
5688 #ifdef TARGET_NR_lock
5689 case TARGET_NR_lock:
5690 goto unimplemented;
5691 #endif
5692 case TARGET_NR_ioctl:
5693 ret = do_ioctl(arg1, arg2, arg3);
5694 break;
5695 case TARGET_NR_fcntl:
5696 ret = do_fcntl(arg1, arg2, arg3);
5697 break;
5698 #ifdef TARGET_NR_mpx
5699 case TARGET_NR_mpx:
5700 goto unimplemented;
5701 #endif
5702 case TARGET_NR_setpgid:
5703 ret = get_errno(setpgid(arg1, arg2));
5704 break;
5705 #ifdef TARGET_NR_ulimit
5706 case TARGET_NR_ulimit:
5707 goto unimplemented;
5708 #endif
5709 #ifdef TARGET_NR_oldolduname
5710 case TARGET_NR_oldolduname:
5711 goto unimplemented;
5712 #endif
5713 case TARGET_NR_umask:
5714 ret = get_errno(umask(arg1));
5715 break;
5716 case TARGET_NR_chroot:
5717 if (!(p = lock_user_string(arg1)))
5718 goto efault;
5719 ret = get_errno(chroot(p));
5720 unlock_user(p, arg1, 0);
5721 break;
5722 case TARGET_NR_ustat:
5723 goto unimplemented;
5724 case TARGET_NR_dup2:
5725 ret = get_errno(dup2(arg1, arg2));
5726 break;
5727 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5728 case TARGET_NR_dup3:
5729 ret = get_errno(dup3(arg1, arg2, arg3));
5730 break;
5731 #endif
5732 #ifdef TARGET_NR_getppid /* not on alpha */
5733 case TARGET_NR_getppid:
5734 ret = get_errno(getppid());
5735 break;
5736 #endif
5737 case TARGET_NR_getpgrp:
5738 ret = get_errno(getpgrp());
5739 break;
5740 case TARGET_NR_setsid:
5741 ret = get_errno(setsid());
5742 break;
5743 #ifdef TARGET_NR_sigaction
5744 case TARGET_NR_sigaction:
5745 {
5746 #if defined(TARGET_ALPHA)
5747 struct target_sigaction act, oact, *pact = 0;
5748 struct target_old_sigaction *old_act;
5749 if (arg2) {
5750 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5751 goto efault;
5752 act._sa_handler = old_act->_sa_handler;
5753 target_siginitset(&act.sa_mask, old_act->sa_mask);
5754 act.sa_flags = old_act->sa_flags;
5755 act.sa_restorer = 0;
5756 unlock_user_struct(old_act, arg2, 0);
5757 pact = &act;
5758 }
5759 ret = get_errno(do_sigaction(arg1, pact, &oact));
5760 if (!is_error(ret) && arg3) {
5761 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5762 goto efault;
5763 old_act->_sa_handler = oact._sa_handler;
5764 old_act->sa_mask = oact.sa_mask.sig[0];
5765 old_act->sa_flags = oact.sa_flags;
5766 unlock_user_struct(old_act, arg3, 1);
5767 }
5768 #elif defined(TARGET_MIPS)
5769 struct target_sigaction act, oact, *pact, *old_act;
5770
5771 if (arg2) {
5772 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5773 goto efault;
5774 act._sa_handler = old_act->_sa_handler;
5775 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5776 act.sa_flags = old_act->sa_flags;
5777 unlock_user_struct(old_act, arg2, 0);
5778 pact = &act;
5779 } else {
5780 pact = NULL;
5781 }
5782
5783 ret = get_errno(do_sigaction(arg1, pact, &oact));
5784
5785 if (!is_error(ret) && arg3) {
5786 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5787 goto efault;
5788 old_act->_sa_handler = oact._sa_handler;
5789 old_act->sa_flags = oact.sa_flags;
5790 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5791 old_act->sa_mask.sig[1] = 0;
5792 old_act->sa_mask.sig[2] = 0;
5793 old_act->sa_mask.sig[3] = 0;
5794 unlock_user_struct(old_act, arg3, 1);
5795 }
5796 #else
5797 struct target_old_sigaction *old_act;
5798 struct target_sigaction act, oact, *pact;
5799 if (arg2) {
5800 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5801 goto efault;
5802 act._sa_handler = old_act->_sa_handler;
5803 target_siginitset(&act.sa_mask, old_act->sa_mask);
5804 act.sa_flags = old_act->sa_flags;
5805 act.sa_restorer = old_act->sa_restorer;
5806 unlock_user_struct(old_act, arg2, 0);
5807 pact = &act;
5808 } else {
5809 pact = NULL;
5810 }
5811 ret = get_errno(do_sigaction(arg1, pact, &oact));
5812 if (!is_error(ret) && arg3) {
5813 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5814 goto efault;
5815 old_act->_sa_handler = oact._sa_handler;
5816 old_act->sa_mask = oact.sa_mask.sig[0];
5817 old_act->sa_flags = oact.sa_flags;
5818 old_act->sa_restorer = oact.sa_restorer;
5819 unlock_user_struct(old_act, arg3, 1);
5820 }
5821 #endif
5822 }
5823 break;
5824 #endif
5825 case TARGET_NR_rt_sigaction:
5826 {
5827 #if defined(TARGET_ALPHA)
5828 struct target_sigaction act, oact, *pact = 0;
5829 struct target_rt_sigaction *rt_act;
5830 /* ??? arg4 == sizeof(sigset_t). */
5831 if (arg2) {
5832 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5833 goto efault;
5834 act._sa_handler = rt_act->_sa_handler;
5835 act.sa_mask = rt_act->sa_mask;
5836 act.sa_flags = rt_act->sa_flags;
5837 act.sa_restorer = arg5;
5838 unlock_user_struct(rt_act, arg2, 0);
5839 pact = &act;
5840 }
5841 ret = get_errno(do_sigaction(arg1, pact, &oact));
5842 if (!is_error(ret) && arg3) {
5843 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5844 goto efault;
5845 rt_act->_sa_handler = oact._sa_handler;
5846 rt_act->sa_mask = oact.sa_mask;
5847 rt_act->sa_flags = oact.sa_flags;
5848 unlock_user_struct(rt_act, arg3, 1);
5849 }
5850 #else
5851 struct target_sigaction *act;
5852 struct target_sigaction *oact;
5853
5854 if (arg2) {
5855 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5856 goto efault;
5857 } else
5858 act = NULL;
5859 if (arg3) {
5860 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5861 ret = -TARGET_EFAULT;
5862 goto rt_sigaction_fail;
5863 }
5864 } else
5865 oact = NULL;
5866 ret = get_errno(do_sigaction(arg1, act, oact));
5867 rt_sigaction_fail:
5868 if (act)
5869 unlock_user_struct(act, arg2, 0);
5870 if (oact)
5871 unlock_user_struct(oact, arg3, 1);
5872 #endif
5873 }
5874 break;
5875 #ifdef TARGET_NR_sgetmask /* not on alpha */
5876 case TARGET_NR_sgetmask:
5877 {
5878 sigset_t cur_set;
5879 abi_ulong target_set;
5880 sigprocmask(0, NULL, &cur_set);
5881 host_to_target_old_sigset(&target_set, &cur_set);
5882 ret = target_set;
5883 }
5884 break;
5885 #endif
5886 #ifdef TARGET_NR_ssetmask /* not on alpha */
5887 case TARGET_NR_ssetmask:
5888 {
5889 sigset_t set, oset, cur_set;
5890 abi_ulong target_set = arg1;
5891 sigprocmask(0, NULL, &cur_set);
5892 target_to_host_old_sigset(&set, &target_set);
5893 sigorset(&set, &set, &cur_set);
5894 sigprocmask(SIG_SETMASK, &set, &oset);
5895 host_to_target_old_sigset(&target_set, &oset);
5896 ret = target_set;
5897 }
5898 break;
5899 #endif
5900 #ifdef TARGET_NR_sigprocmask
5901 case TARGET_NR_sigprocmask:
5902 {
5903 #if defined(TARGET_ALPHA)
5904 sigset_t set, oldset;
5905 abi_ulong mask;
5906 int how;
5907
5908 switch (arg1) {
5909 case TARGET_SIG_BLOCK:
5910 how = SIG_BLOCK;
5911 break;
5912 case TARGET_SIG_UNBLOCK:
5913 how = SIG_UNBLOCK;
5914 break;
5915 case TARGET_SIG_SETMASK:
5916 how = SIG_SETMASK;
5917 break;
5918 default:
5919 ret = -TARGET_EINVAL;
5920 goto fail;
5921 }
5922 mask = arg2;
5923 target_to_host_old_sigset(&set, &mask);
5924
5925 ret = get_errno(sigprocmask(how, &set, &oldset));
5926 if (!is_error(ret)) {
5927 host_to_target_old_sigset(&mask, &oldset);
5928 ret = mask;
5929 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5930 }
5931 #else
5932 sigset_t set, oldset, *set_ptr;
5933 int how;
5934
5935 if (arg2) {
5936 switch (arg1) {
5937 case TARGET_SIG_BLOCK:
5938 how = SIG_BLOCK;
5939 break;
5940 case TARGET_SIG_UNBLOCK:
5941 how = SIG_UNBLOCK;
5942 break;
5943 case TARGET_SIG_SETMASK:
5944 how = SIG_SETMASK;
5945 break;
5946 default:
5947 ret = -TARGET_EINVAL;
5948 goto fail;
5949 }
5950 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5951 goto efault;
5952 target_to_host_old_sigset(&set, p);
5953 unlock_user(p, arg2, 0);
5954 set_ptr = &set;
5955 } else {
5956 how = 0;
5957 set_ptr = NULL;
5958 }
5959 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5960 if (!is_error(ret) && arg3) {
5961 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5962 goto efault;
5963 host_to_target_old_sigset(p, &oldset);
5964 unlock_user(p, arg3, sizeof(target_sigset_t));
5965 }
5966 #endif
5967 }
5968 break;
5969 #endif
5970 case TARGET_NR_rt_sigprocmask:
5971 {
5972 int how = arg1;
5973 sigset_t set, oldset, *set_ptr;
5974
5975 if (arg2) {
5976 switch(how) {
5977 case TARGET_SIG_BLOCK:
5978 how = SIG_BLOCK;
5979 break;
5980 case TARGET_SIG_UNBLOCK:
5981 how = SIG_UNBLOCK;
5982 break;
5983 case TARGET_SIG_SETMASK:
5984 how = SIG_SETMASK;
5985 break;
5986 default:
5987 ret = -TARGET_EINVAL;
5988 goto fail;
5989 }
5990 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5991 goto efault;
5992 target_to_host_sigset(&set, p);
5993 unlock_user(p, arg2, 0);
5994 set_ptr = &set;
5995 } else {
5996 how = 0;
5997 set_ptr = NULL;
5998 }
5999 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6000 if (!is_error(ret) && arg3) {
6001 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6002 goto efault;
6003 host_to_target_sigset(p, &oldset);
6004 unlock_user(p, arg3, sizeof(target_sigset_t));
6005 }
6006 }
6007 break;
6008 #ifdef TARGET_NR_sigpending
6009 case TARGET_NR_sigpending:
6010 {
6011 sigset_t set;
6012 ret = get_errno(sigpending(&set));
6013 if (!is_error(ret)) {
6014 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6015 goto efault;
6016 host_to_target_old_sigset(p, &set);
6017 unlock_user(p, arg1, sizeof(target_sigset_t));
6018 }
6019 }
6020 break;
6021 #endif
6022 case TARGET_NR_rt_sigpending:
6023 {
6024 sigset_t set;
6025 ret = get_errno(sigpending(&set));
6026 if (!is_error(ret)) {
6027 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6028 goto efault;
6029 host_to_target_sigset(p, &set);
6030 unlock_user(p, arg1, sizeof(target_sigset_t));
6031 }
6032 }
6033 break;
6034 #ifdef TARGET_NR_sigsuspend
6035 case TARGET_NR_sigsuspend:
6036 {
6037 sigset_t set;
6038 #if defined(TARGET_ALPHA)
6039 abi_ulong mask = arg1;
6040 target_to_host_old_sigset(&set, &mask);
6041 #else
6042 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6043 goto efault;
6044 target_to_host_old_sigset(&set, p);
6045 unlock_user(p, arg1, 0);
6046 #endif
6047 ret = get_errno(sigsuspend(&set));
6048 }
6049 break;
6050 #endif
6051 case TARGET_NR_rt_sigsuspend:
6052 {
6053 sigset_t set;
6054 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6055 goto efault;
6056 target_to_host_sigset(&set, p);
6057 unlock_user(p, arg1, 0);
6058 ret = get_errno(sigsuspend(&set));
6059 }
6060 break;
6061 case TARGET_NR_rt_sigtimedwait:
6062 {
6063 sigset_t set;
6064 struct timespec uts, *puts;
6065 siginfo_t uinfo;
6066
6067 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6068 goto efault;
6069 target_to_host_sigset(&set, p);
6070 unlock_user(p, arg1, 0);
6071 if (arg3) {
6072 puts = &uts;
6073 target_to_host_timespec(puts, arg3);
6074 } else {
6075 puts = NULL;
6076 }
6077 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6078 if (!is_error(ret) && arg2) {
6079 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6080 goto efault;
6081 host_to_target_siginfo(p, &uinfo);
6082 unlock_user(p, arg2, sizeof(target_siginfo_t));
6083 }
6084 }
6085 break;
6086 case TARGET_NR_rt_sigqueueinfo:
6087 {
6088 siginfo_t uinfo;
6089 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6090 goto efault;
6091 target_to_host_siginfo(&uinfo, p);
6092 unlock_user(p, arg1, 0);
6093 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6094 }
6095 break;
6096 #ifdef TARGET_NR_sigreturn
6097 case TARGET_NR_sigreturn:
6098 /* NOTE: ret is eax, so not transcoding must be done */
6099 ret = do_sigreturn(cpu_env);
6100 break;
6101 #endif
6102 case TARGET_NR_rt_sigreturn:
6103 /* NOTE: ret is eax, so not transcoding must be done */
6104 ret = do_rt_sigreturn(cpu_env);
6105 break;
6106 case TARGET_NR_sethostname:
6107 if (!(p = lock_user_string(arg1)))
6108 goto efault;
6109 ret = get_errno(sethostname(p, arg2));
6110 unlock_user(p, arg1, 0);
6111 break;
6112 case TARGET_NR_setrlimit:
6113 {
6114 int resource = target_to_host_resource(arg1);
6115 struct target_rlimit *target_rlim;
6116 struct rlimit rlim;
6117 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6118 goto efault;
6119 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6120 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6121 unlock_user_struct(target_rlim, arg2, 0);
6122 ret = get_errno(setrlimit(resource, &rlim));
6123 }
6124 break;
6125 case TARGET_NR_getrlimit:
6126 {
6127 int resource = target_to_host_resource(arg1);
6128 struct target_rlimit *target_rlim;
6129 struct rlimit rlim;
6130
6131 ret = get_errno(getrlimit(resource, &rlim));
6132 if (!is_error(ret)) {
6133 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6134 goto efault;
6135 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6136 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6137 unlock_user_struct(target_rlim, arg2, 1);
6138 }
6139 }
6140 break;
6141 case TARGET_NR_getrusage:
6142 {
6143 struct rusage rusage;
6144 ret = get_errno(getrusage(arg1, &rusage));
6145 if (!is_error(ret)) {
6146 host_to_target_rusage(arg2, &rusage);
6147 }
6148 }
6149 break;
6150 case TARGET_NR_gettimeofday:
6151 {
6152 struct timeval tv;
6153 ret = get_errno(gettimeofday(&tv, NULL));
6154 if (!is_error(ret)) {
6155 if (copy_to_user_timeval(arg1, &tv))
6156 goto efault;
6157 }
6158 }
6159 break;
6160 case TARGET_NR_settimeofday:
6161 {
6162 struct timeval tv;
6163 if (copy_from_user_timeval(&tv, arg1))
6164 goto efault;
6165 ret = get_errno(settimeofday(&tv, NULL));
6166 }
6167 break;
6168 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
6169 case TARGET_NR_select:
6170 {
6171 struct target_sel_arg_struct *sel;
6172 abi_ulong inp, outp, exp, tvp;
6173 long nsel;
6174
6175 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6176 goto efault;
6177 nsel = tswapal(sel->n);
6178 inp = tswapal(sel->inp);
6179 outp = tswapal(sel->outp);
6180 exp = tswapal(sel->exp);
6181 tvp = tswapal(sel->tvp);
6182 unlock_user_struct(sel, arg1, 0);
6183 ret = do_select(nsel, inp, outp, exp, tvp);
6184 }
6185 break;
6186 #endif
6187 #ifdef TARGET_NR_pselect6
6188 case TARGET_NR_pselect6:
6189 {
6190 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6191 fd_set rfds, wfds, efds;
6192 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6193 struct timespec ts, *ts_ptr;
6194
6195 /*
6196 * The 6th arg is actually two args smashed together,
6197 * so we cannot use the C library.
6198 */
6199 sigset_t set;
6200 struct {
6201 sigset_t *set;
6202 size_t size;
6203 } sig, *sig_ptr;
6204
6205 abi_ulong arg_sigset, arg_sigsize, *arg7;
6206 target_sigset_t *target_sigset;
6207
6208 n = arg1;
6209 rfd_addr = arg2;
6210 wfd_addr = arg3;
6211 efd_addr = arg4;
6212 ts_addr = arg5;
6213
6214 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6215 if (ret) {
6216 goto fail;
6217 }
6218 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6219 if (ret) {
6220 goto fail;
6221 }
6222 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6223 if (ret) {
6224 goto fail;
6225 }
6226
6227 /*
6228 * This takes a timespec, and not a timeval, so we cannot
6229 * use the do_select() helper ...
6230 */
6231 if (ts_addr) {
6232 if (target_to_host_timespec(&ts, ts_addr)) {
6233 goto efault;
6234 }
6235 ts_ptr = &ts;
6236 } else {
6237 ts_ptr = NULL;
6238 }
6239
6240 /* Extract the two packed args for the sigset */
6241 if (arg6) {
6242 sig_ptr = &sig;
6243 sig.size = _NSIG / 8;
6244
6245 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6246 if (!arg7) {
6247 goto efault;
6248 }
6249 arg_sigset = tswapal(arg7[0]);
6250 arg_sigsize = tswapal(arg7[1]);
6251 unlock_user(arg7, arg6, 0);
6252
6253 if (arg_sigset) {
6254 sig.set = &set;
6255 if (arg_sigsize != sizeof(*target_sigset)) {
6256 /* Like the kernel, we enforce correct size sigsets */
6257 ret = -TARGET_EINVAL;
6258 goto fail;
6259 }
6260 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6261 sizeof(*target_sigset), 1);
6262 if (!target_sigset) {
6263 goto efault;
6264 }
6265 target_to_host_sigset(&set, target_sigset);
6266 unlock_user(target_sigset, arg_sigset, 0);
6267 } else {
6268 sig.set = NULL;
6269 }
6270 } else {
6271 sig_ptr = NULL;
6272 }
6273
6274 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6275 ts_ptr, sig_ptr));
6276
6277 if (!is_error(ret)) {
6278 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6279 goto efault;
6280 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6281 goto efault;
6282 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6283 goto efault;
6284
6285 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6286 goto efault;
6287 }
6288 }
6289 break;
6290 #endif
6291 case TARGET_NR_symlink:
6292 {
6293 void *p2;
6294 p = lock_user_string(arg1);
6295 p2 = lock_user_string(arg2);
6296 if (!p || !p2)
6297 ret = -TARGET_EFAULT;
6298 else
6299 ret = get_errno(symlink(p, p2));
6300 unlock_user(p2, arg2, 0);
6301 unlock_user(p, arg1, 0);
6302 }
6303 break;
6304 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6305 case TARGET_NR_symlinkat:
6306 {
6307 void *p2;
6308 p = lock_user_string(arg1);
6309 p2 = lock_user_string(arg3);
6310 if (!p || !p2)
6311 ret = -TARGET_EFAULT;
6312 else
6313 ret = get_errno(sys_symlinkat(p, arg2, p2));
6314 unlock_user(p2, arg3, 0);
6315 unlock_user(p, arg1, 0);
6316 }
6317 break;
6318 #endif
6319 #ifdef TARGET_NR_oldlstat
6320 case TARGET_NR_oldlstat:
6321 goto unimplemented;
6322 #endif
6323 case TARGET_NR_readlink:
6324 {
6325 void *p2, *temp;
6326 p = lock_user_string(arg1);
6327 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6328 if (!p || !p2)
6329 ret = -TARGET_EFAULT;
6330 else {
6331 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6332 char real[PATH_MAX];
6333 temp = realpath(exec_path,real);
6334 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6335 snprintf((char *)p2, arg3, "%s", real);
6336 }
6337 else
6338 ret = get_errno(readlink(path(p), p2, arg3));
6339 }
6340 unlock_user(p2, arg2, ret);
6341 unlock_user(p, arg1, 0);
6342 }
6343 break;
6344 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6345 case TARGET_NR_readlinkat:
6346 {
6347 void *p2;
6348 p = lock_user_string(arg2);
6349 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6350 if (!p || !p2)
6351 ret = -TARGET_EFAULT;
6352 else
6353 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6354 unlock_user(p2, arg3, ret);
6355 unlock_user(p, arg2, 0);
6356 }
6357 break;
6358 #endif
6359 #ifdef TARGET_NR_uselib
6360 case TARGET_NR_uselib:
6361 goto unimplemented;
6362 #endif
6363 #ifdef TARGET_NR_swapon
6364 case TARGET_NR_swapon:
6365 if (!(p = lock_user_string(arg1)))
6366 goto efault;
6367 ret = get_errno(swapon(p, arg2));
6368 unlock_user(p, arg1, 0);
6369 break;
6370 #endif
6371 case TARGET_NR_reboot:
6372 if (!(p = lock_user_string(arg4)))
6373 goto efault;
6374 ret = reboot(arg1, arg2, arg3, p);
6375 unlock_user(p, arg4, 0);
6376 break;
6377 #ifdef TARGET_NR_readdir
6378 case TARGET_NR_readdir:
6379 goto unimplemented;
6380 #endif
6381 #ifdef TARGET_NR_mmap
6382 case TARGET_NR_mmap:
6383 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6384 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6385 || defined(TARGET_S390X)
6386 {
6387 abi_ulong *v;
6388 abi_ulong v1, v2, v3, v4, v5, v6;
6389 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6390 goto efault;
6391 v1 = tswapal(v[0]);
6392 v2 = tswapal(v[1]);
6393 v3 = tswapal(v[2]);
6394 v4 = tswapal(v[3]);
6395 v5 = tswapal(v[4]);
6396 v6 = tswapal(v[5]);
6397 unlock_user(v, arg1, 0);
6398 ret = get_errno(target_mmap(v1, v2, v3,
6399 target_to_host_bitmask(v4, mmap_flags_tbl),
6400 v5, v6));
6401 }
6402 #else
6403 ret = get_errno(target_mmap(arg1, arg2, arg3,
6404 target_to_host_bitmask(arg4, mmap_flags_tbl),
6405 arg5,
6406 arg6));
6407 #endif
6408 break;
6409 #endif
6410 #ifdef TARGET_NR_mmap2
6411 case TARGET_NR_mmap2:
6412 #ifndef MMAP_SHIFT
6413 #define MMAP_SHIFT 12
6414 #endif
6415 ret = get_errno(target_mmap(arg1, arg2, arg3,
6416 target_to_host_bitmask(arg4, mmap_flags_tbl),
6417 arg5,
6418 arg6 << MMAP_SHIFT));
6419 break;
6420 #endif
6421 case TARGET_NR_munmap:
6422 ret = get_errno(target_munmap(arg1, arg2));
6423 break;
6424 case TARGET_NR_mprotect:
6425 {
6426 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6427 /* Special hack to detect libc making the stack executable. */
6428 if ((arg3 & PROT_GROWSDOWN)
6429 && arg1 >= ts->info->stack_limit
6430 && arg1 <= ts->info->start_stack) {
6431 arg3 &= ~PROT_GROWSDOWN;
6432 arg2 = arg2 + arg1 - ts->info->stack_limit;
6433 arg1 = ts->info->stack_limit;
6434 }
6435 }
6436 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6437 break;
6438 #ifdef TARGET_NR_mremap
6439 case TARGET_NR_mremap:
6440 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6441 break;
6442 #endif
6443 /* ??? msync/mlock/munlock are broken for softmmu. */
6444 #ifdef TARGET_NR_msync
6445 case TARGET_NR_msync:
6446 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6447 break;
6448 #endif
6449 #ifdef TARGET_NR_mlock
6450 case TARGET_NR_mlock:
6451 ret = get_errno(mlock(g2h(arg1), arg2));
6452 break;
6453 #endif
6454 #ifdef TARGET_NR_munlock
6455 case TARGET_NR_munlock:
6456 ret = get_errno(munlock(g2h(arg1), arg2));
6457 break;
6458 #endif
6459 #ifdef TARGET_NR_mlockall
6460 case TARGET_NR_mlockall:
6461 ret = get_errno(mlockall(arg1));
6462 break;
6463 #endif
6464 #ifdef TARGET_NR_munlockall
6465 case TARGET_NR_munlockall:
6466 ret = get_errno(munlockall());
6467 break;
6468 #endif
6469 case TARGET_NR_truncate:
6470 if (!(p = lock_user_string(arg1)))
6471 goto efault;
6472 ret = get_errno(truncate(p, arg2));
6473 unlock_user(p, arg1, 0);
6474 break;
6475 case TARGET_NR_ftruncate:
6476 ret = get_errno(ftruncate(arg1, arg2));
6477 break;
6478 case TARGET_NR_fchmod:
6479 ret = get_errno(fchmod(arg1, arg2));
6480 break;
6481 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6482 case TARGET_NR_fchmodat:
6483 if (!(p = lock_user_string(arg2)))
6484 goto efault;
6485 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6486 unlock_user(p, arg2, 0);
6487 break;
6488 #endif
6489 case TARGET_NR_getpriority:
6490 /* Note that negative values are valid for getpriority, so we must
6491 differentiate based on errno settings. */
6492 errno = 0;
6493 ret = getpriority(arg1, arg2);
6494 if (ret == -1 && errno != 0) {
6495 ret = -host_to_target_errno(errno);
6496 break;
6497 }
6498 #ifdef TARGET_ALPHA
6499 /* Return value is the unbiased priority. Signal no error. */
6500 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6501 #else
6502 /* Return value is a biased priority to avoid negative numbers. */
6503 ret = 20 - ret;
6504 #endif
6505 break;
6506 case TARGET_NR_setpriority:
6507 ret = get_errno(setpriority(arg1, arg2, arg3));
6508 break;
6509 #ifdef TARGET_NR_profil
6510 case TARGET_NR_profil:
6511 goto unimplemented;
6512 #endif
6513 case TARGET_NR_statfs:
6514 if (!(p = lock_user_string(arg1)))
6515 goto efault;
6516 ret = get_errno(statfs(path(p), &stfs));
6517 unlock_user(p, arg1, 0);
6518 convert_statfs:
6519 if (!is_error(ret)) {
6520 struct target_statfs *target_stfs;
6521
6522 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6523 goto efault;
6524 __put_user(stfs.f_type, &target_stfs->f_type);
6525 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6526 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6527 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6528 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6529 __put_user(stfs.f_files, &target_stfs->f_files);
6530 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6531 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6532 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6533 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6534 unlock_user_struct(target_stfs, arg2, 1);
6535 }
6536 break;
6537 case TARGET_NR_fstatfs:
6538 ret = get_errno(fstatfs(arg1, &stfs));
6539 goto convert_statfs;
6540 #ifdef TARGET_NR_statfs64
6541 case TARGET_NR_statfs64:
6542 if (!(p = lock_user_string(arg1)))
6543 goto efault;
6544 ret = get_errno(statfs(path(p), &stfs));
6545 unlock_user(p, arg1, 0);
6546 convert_statfs64:
6547 if (!is_error(ret)) {
6548 struct target_statfs64 *target_stfs;
6549
6550 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6551 goto efault;
6552 __put_user(stfs.f_type, &target_stfs->f_type);
6553 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6554 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6555 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6556 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6557 __put_user(stfs.f_files, &target_stfs->f_files);
6558 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6559 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6560 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6561 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6562 unlock_user_struct(target_stfs, arg3, 1);
6563 }
6564 break;
6565 case TARGET_NR_fstatfs64:
6566 ret = get_errno(fstatfs(arg1, &stfs));
6567 goto convert_statfs64;
6568 #endif
6569 #ifdef TARGET_NR_ioperm
6570 case TARGET_NR_ioperm:
6571 goto unimplemented;
6572 #endif
6573 #ifdef TARGET_NR_socketcall
6574 case TARGET_NR_socketcall:
6575 ret = do_socketcall(arg1, arg2);
6576 break;
6577 #endif
6578 #ifdef TARGET_NR_accept
6579 case TARGET_NR_accept:
6580 ret = do_accept(arg1, arg2, arg3);
6581 break;
6582 #endif
6583 #ifdef TARGET_NR_bind
6584 case TARGET_NR_bind:
6585 ret = do_bind(arg1, arg2, arg3);
6586 break;
6587 #endif
6588 #ifdef TARGET_NR_connect
6589 case TARGET_NR_connect:
6590 ret = do_connect(arg1, arg2, arg3);
6591 break;
6592 #endif
6593 #ifdef TARGET_NR_getpeername
6594 case TARGET_NR_getpeername:
6595 ret = do_getpeername(arg1, arg2, arg3);
6596 break;
6597 #endif
6598 #ifdef TARGET_NR_getsockname
6599 case TARGET_NR_getsockname:
6600 ret = do_getsockname(arg1, arg2, arg3);
6601 break;
6602 #endif
6603 #ifdef TARGET_NR_getsockopt
6604 case TARGET_NR_getsockopt:
6605 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6606 break;
6607 #endif
6608 #ifdef TARGET_NR_listen
6609 case TARGET_NR_listen:
6610 ret = get_errno(listen(arg1, arg2));
6611 break;
6612 #endif
6613 #ifdef TARGET_NR_recv
6614 case TARGET_NR_recv:
6615 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6616 break;
6617 #endif
6618 #ifdef TARGET_NR_recvfrom
6619 case TARGET_NR_recvfrom:
6620 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6621 break;
6622 #endif
6623 #ifdef TARGET_NR_recvmsg
6624 case TARGET_NR_recvmsg:
6625 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6626 break;
6627 #endif
6628 #ifdef TARGET_NR_send
6629 case TARGET_NR_send:
6630 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6631 break;
6632 #endif
6633 #ifdef TARGET_NR_sendmsg
6634 case TARGET_NR_sendmsg:
6635 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6636 break;
6637 #endif
6638 #ifdef TARGET_NR_sendto
6639 case TARGET_NR_sendto:
6640 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6641 break;
6642 #endif
6643 #ifdef TARGET_NR_shutdown
6644 case TARGET_NR_shutdown:
6645 ret = get_errno(shutdown(arg1, arg2));
6646 break;
6647 #endif
6648 #ifdef TARGET_NR_socket
6649 case TARGET_NR_socket:
6650 ret = do_socket(arg1, arg2, arg3);
6651 break;
6652 #endif
6653 #ifdef TARGET_NR_socketpair
6654 case TARGET_NR_socketpair:
6655 ret = do_socketpair(arg1, arg2, arg3, arg4);
6656 break;
6657 #endif
6658 #ifdef TARGET_NR_setsockopt
6659 case TARGET_NR_setsockopt:
6660 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6661 break;
6662 #endif
6663
6664 case TARGET_NR_syslog:
6665 if (!(p = lock_user_string(arg2)))
6666 goto efault;
6667 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6668 unlock_user(p, arg2, 0);
6669 break;
6670
6671 case TARGET_NR_setitimer:
6672 {
6673 struct itimerval value, ovalue, *pvalue;
6674
6675 if (arg2) {
6676 pvalue = &value;
6677 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6678 || copy_from_user_timeval(&pvalue->it_value,
6679 arg2 + sizeof(struct target_timeval)))
6680 goto efault;
6681 } else {
6682 pvalue = NULL;
6683 }
6684 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6685 if (!is_error(ret) && arg3) {
6686 if (copy_to_user_timeval(arg3,
6687 &ovalue.it_interval)
6688 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6689 &ovalue.it_value))
6690 goto efault;
6691 }
6692 }
6693 break;
6694 case TARGET_NR_getitimer:
6695 {
6696 struct itimerval value;
6697
6698 ret = get_errno(getitimer(arg1, &value));
6699 if (!is_error(ret) && arg2) {
6700 if (copy_to_user_timeval(arg2,
6701 &value.it_interval)
6702 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6703 &value.it_value))
6704 goto efault;
6705 }
6706 }
6707 break;
6708 case TARGET_NR_stat:
6709 if (!(p = lock_user_string(arg1)))
6710 goto efault;
6711 ret = get_errno(stat(path(p), &st));
6712 unlock_user(p, arg1, 0);
6713 goto do_stat;
6714 case TARGET_NR_lstat:
6715 if (!(p = lock_user_string(arg1)))
6716 goto efault;
6717 ret = get_errno(lstat(path(p), &st));
6718 unlock_user(p, arg1, 0);
6719 goto do_stat;
6720 case TARGET_NR_fstat:
6721 {
6722 ret = get_errno(fstat(arg1, &st));
6723 do_stat:
6724 if (!is_error(ret)) {
6725 struct target_stat *target_st;
6726
6727 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6728 goto efault;
6729 memset(target_st, 0, sizeof(*target_st));
6730 __put_user(st.st_dev, &target_st->st_dev);
6731 __put_user(st.st_ino, &target_st->st_ino);
6732 __put_user(st.st_mode, &target_st->st_mode);
6733 __put_user(st.st_uid, &target_st->st_uid);
6734 __put_user(st.st_gid, &target_st->st_gid);
6735 __put_user(st.st_nlink, &target_st->st_nlink);
6736 __put_user(st.st_rdev, &target_st->st_rdev);
6737 __put_user(st.st_size, &target_st->st_size);
6738 __put_user(st.st_blksize, &target_st->st_blksize);
6739 __put_user(st.st_blocks, &target_st->st_blocks);
6740 __put_user(st.st_atime, &target_st->target_st_atime);
6741 __put_user(st.st_mtime, &target_st->target_st_mtime);
6742 __put_user(st.st_ctime, &target_st->target_st_ctime);
6743 unlock_user_struct(target_st, arg2, 1);
6744 }
6745 }
6746 break;
6747 #ifdef TARGET_NR_olduname
6748 case TARGET_NR_olduname:
6749 goto unimplemented;
6750 #endif
6751 #ifdef TARGET_NR_iopl
6752 case TARGET_NR_iopl:
6753 goto unimplemented;
6754 #endif
6755 case TARGET_NR_vhangup:
6756 ret = get_errno(vhangup());
6757 break;
6758 #ifdef TARGET_NR_idle
6759 case TARGET_NR_idle:
6760 goto unimplemented;
6761 #endif
6762 #ifdef TARGET_NR_syscall
6763 case TARGET_NR_syscall:
6764 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6765 arg6, arg7, arg8, 0);
6766 break;
6767 #endif
6768 case TARGET_NR_wait4:
6769 {
6770 int status;
6771 abi_long status_ptr = arg2;
6772 struct rusage rusage, *rusage_ptr;
6773 abi_ulong target_rusage = arg4;
6774 if (target_rusage)
6775 rusage_ptr = &rusage;
6776 else
6777 rusage_ptr = NULL;
6778 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6779 if (!is_error(ret)) {
6780 if (status_ptr && ret) {
6781 status = host_to_target_waitstatus(status);
6782 if (put_user_s32(status, status_ptr))
6783 goto efault;
6784 }
6785 if (target_rusage)
6786 host_to_target_rusage(target_rusage, &rusage);
6787 }
6788 }
6789 break;
6790 #ifdef TARGET_NR_swapoff
6791 case TARGET_NR_swapoff:
6792 if (!(p = lock_user_string(arg1)))
6793 goto efault;
6794 ret = get_errno(swapoff(p));
6795 unlock_user(p, arg1, 0);
6796 break;
6797 #endif
6798 case TARGET_NR_sysinfo:
6799 {
6800 struct target_sysinfo *target_value;
6801 struct sysinfo value;
6802 ret = get_errno(sysinfo(&value));
6803 if (!is_error(ret) && arg1)
6804 {
6805 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6806 goto efault;
6807 __put_user(value.uptime, &target_value->uptime);
6808 __put_user(value.loads[0], &target_value->loads[0]);
6809 __put_user(value.loads[1], &target_value->loads[1]);
6810 __put_user(value.loads[2], &target_value->loads[2]);
6811 __put_user(value.totalram, &target_value->totalram);
6812 __put_user(value.freeram, &target_value->freeram);
6813 __put_user(value.sharedram, &target_value->sharedram);
6814 __put_user(value.bufferram, &target_value->bufferram);
6815 __put_user(value.totalswap, &target_value->totalswap);
6816 __put_user(value.freeswap, &target_value->freeswap);
6817 __put_user(value.procs, &target_value->procs);
6818 __put_user(value.totalhigh, &target_value->totalhigh);
6819 __put_user(value.freehigh, &target_value->freehigh);
6820 __put_user(value.mem_unit, &target_value->mem_unit);
6821 unlock_user_struct(target_value, arg1, 1);
6822 }
6823 }
6824 break;
6825 #ifdef TARGET_NR_ipc
6826 case TARGET_NR_ipc:
6827 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6828 break;
6829 #endif
6830 #ifdef TARGET_NR_semget
6831 case TARGET_NR_semget:
6832 ret = get_errno(semget(arg1, arg2, arg3));
6833 break;
6834 #endif
6835 #ifdef TARGET_NR_semop
6836 case TARGET_NR_semop:
6837 ret = get_errno(do_semop(arg1, arg2, arg3));
6838 break;
6839 #endif
6840 #ifdef TARGET_NR_semctl
6841 case TARGET_NR_semctl:
6842 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6843 break;
6844 #endif
6845 #ifdef TARGET_NR_msgctl
6846 case TARGET_NR_msgctl:
6847 ret = do_msgctl(arg1, arg2, arg3);
6848 break;
6849 #endif
6850 #ifdef TARGET_NR_msgget
6851 case TARGET_NR_msgget:
6852 ret = get_errno(msgget(arg1, arg2));
6853 break;
6854 #endif
6855 #ifdef TARGET_NR_msgrcv
6856 case TARGET_NR_msgrcv:
6857 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6858 break;
6859 #endif
6860 #ifdef TARGET_NR_msgsnd
6861 case TARGET_NR_msgsnd:
6862 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6863 break;
6864 #endif
6865 #ifdef TARGET_NR_shmget
6866 case TARGET_NR_shmget:
6867 ret = get_errno(shmget(arg1, arg2, arg3));
6868 break;
6869 #endif
6870 #ifdef TARGET_NR_shmctl
6871 case TARGET_NR_shmctl:
6872 ret = do_shmctl(arg1, arg2, arg3);
6873 break;
6874 #endif
6875 #ifdef TARGET_NR_shmat
6876 case TARGET_NR_shmat:
6877 ret = do_shmat(arg1, arg2, arg3);
6878 break;
6879 #endif
6880 #ifdef TARGET_NR_shmdt
6881 case TARGET_NR_shmdt:
6882 ret = do_shmdt(arg1);
6883 break;
6884 #endif
6885 case TARGET_NR_fsync:
6886 ret = get_errno(fsync(arg1));
6887 break;
6888 case TARGET_NR_clone:
6889 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6890 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6891 #elif defined(TARGET_CRIS)
6892 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6893 #elif defined(TARGET_S390X)
6894 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6895 #else
6896 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6897 #endif
6898 break;
6899 #ifdef __NR_exit_group
6900 /* new thread calls */
6901 case TARGET_NR_exit_group:
6902 #ifdef TARGET_GPROF
6903 _mcleanup();
6904 #endif
6905 gdb_exit(cpu_env, arg1);
6906 ret = get_errno(exit_group(arg1));
6907 break;
6908 #endif
6909 case TARGET_NR_setdomainname:
6910 if (!(p = lock_user_string(arg1)))
6911 goto efault;
6912 ret = get_errno(setdomainname(p, arg2));
6913 unlock_user(p, arg1, 0);
6914 break;
6915 case TARGET_NR_uname:
6916 /* no need to transcode because we use the linux syscall */
6917 {
6918 struct new_utsname * buf;
6919
6920 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6921 goto efault;
6922 ret = get_errno(sys_uname(buf));
6923 if (!is_error(ret)) {
6924 /* Overrite the native machine name with whatever is being
6925 emulated. */
6926 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6927 /* Allow the user to override the reported release. */
6928 if (qemu_uname_release && *qemu_uname_release)
6929 strcpy (buf->release, qemu_uname_release);
6930 }
6931 unlock_user_struct(buf, arg1, 1);
6932 }
6933 break;
6934 #ifdef TARGET_I386
6935 case TARGET_NR_modify_ldt:
6936 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6937 break;
6938 #if !defined(TARGET_X86_64)
6939 case TARGET_NR_vm86old:
6940 goto unimplemented;
6941 case TARGET_NR_vm86:
6942 ret = do_vm86(cpu_env, arg1, arg2);
6943 break;
6944 #endif
6945 #endif
6946 case TARGET_NR_adjtimex:
6947 goto unimplemented;
6948 #ifdef TARGET_NR_create_module
6949 case TARGET_NR_create_module:
6950 #endif
6951 case TARGET_NR_init_module:
6952 case TARGET_NR_delete_module:
6953 #ifdef TARGET_NR_get_kernel_syms
6954 case TARGET_NR_get_kernel_syms:
6955 #endif
6956 goto unimplemented;
6957 case TARGET_NR_quotactl:
6958 goto unimplemented;
6959 case TARGET_NR_getpgid:
6960 ret = get_errno(getpgid(arg1));
6961 break;
6962 case TARGET_NR_fchdir:
6963 ret = get_errno(fchdir(arg1));
6964 break;
6965 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6966 case TARGET_NR_bdflush:
6967 goto unimplemented;
6968 #endif
6969 #ifdef TARGET_NR_sysfs
6970 case TARGET_NR_sysfs:
6971 goto unimplemented;
6972 #endif
6973 case TARGET_NR_personality:
6974 ret = get_errno(personality(arg1));
6975 break;
6976 #ifdef TARGET_NR_afs_syscall
6977 case TARGET_NR_afs_syscall:
6978 goto unimplemented;
6979 #endif
6980 #ifdef TARGET_NR__llseek /* Not on alpha */
6981 case TARGET_NR__llseek:
6982 {
6983 int64_t res;
6984 #if !defined(__NR_llseek)
6985 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6986 if (res == -1) {
6987 ret = get_errno(res);
6988 } else {
6989 ret = 0;
6990 }
6991 #else
6992 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6993 #endif
6994 if ((ret == 0) && put_user_s64(res, arg4)) {
6995 goto efault;
6996 }
6997 }
6998 break;
6999 #endif
7000 case TARGET_NR_getdents:
7001 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7002 {
7003 struct target_dirent *target_dirp;
7004 struct linux_dirent *dirp;
7005 abi_long count = arg3;
7006
7007 dirp = malloc(count);
7008 if (!dirp) {
7009 ret = -TARGET_ENOMEM;
7010 goto fail;
7011 }
7012
7013 ret = get_errno(sys_getdents(arg1, dirp, count));
7014 if (!is_error(ret)) {
7015 struct linux_dirent *de;
7016 struct target_dirent *tde;
7017 int len = ret;
7018 int reclen, treclen;
7019 int count1, tnamelen;
7020
7021 count1 = 0;
7022 de = dirp;
7023 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7024 goto efault;
7025 tde = target_dirp;
7026 while (len > 0) {
7027 reclen = de->d_reclen;
7028 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7029 assert(tnamelen >= 0);
7030 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7031 assert(count1 + treclen <= count);
7032 tde->d_reclen = tswap16(treclen);
7033 tde->d_ino = tswapal(de->d_ino);
7034 tde->d_off = tswapal(de->d_off);
7035 memcpy(tde->d_name, de->d_name, tnamelen);
7036 de = (struct linux_dirent *)((char *)de + reclen);
7037 len -= reclen;
7038 tde = (struct target_dirent *)((char *)tde + treclen);
7039 count1 += treclen;
7040 }
7041 ret = count1;
7042 unlock_user(target_dirp, arg2, ret);
7043 }
7044 free(dirp);
7045 }
7046 #else
7047 {
7048 struct linux_dirent *dirp;
7049 abi_long count = arg3;
7050
7051 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7052 goto efault;
7053 ret = get_errno(sys_getdents(arg1, dirp, count));
7054 if (!is_error(ret)) {
7055 struct linux_dirent *de;
7056 int len = ret;
7057 int reclen;
7058 de = dirp;
7059 while (len > 0) {
7060 reclen = de->d_reclen;
7061 if (reclen > len)
7062 break;
7063 de->d_reclen = tswap16(reclen);
7064 tswapls(&de->d_ino);
7065 tswapls(&de->d_off);
7066 de = (struct linux_dirent *)((char *)de + reclen);
7067 len -= reclen;
7068 }
7069 }
7070 unlock_user(dirp, arg2, ret);
7071 }
7072 #endif
7073 break;
7074 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7075 case TARGET_NR_getdents64:
7076 {
7077 struct linux_dirent64 *dirp;
7078 abi_long count = arg3;
7079 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7080 goto efault;
7081 ret = get_errno(sys_getdents64(arg1, dirp, count));
7082 if (!is_error(ret)) {
7083 struct linux_dirent64 *de;
7084 int len = ret;
7085 int reclen;
7086 de = dirp;
7087 while (len > 0) {
7088 reclen = de->d_reclen;
7089 if (reclen > len)
7090 break;
7091 de->d_reclen = tswap16(reclen);
7092 tswap64s((uint64_t *)&de->d_ino);
7093 tswap64s((uint64_t *)&de->d_off);
7094 de = (struct linux_dirent64 *)((char *)de + reclen);
7095 len -= reclen;
7096 }
7097 }
7098 unlock_user(dirp, arg2, ret);
7099 }
7100 break;
7101 #endif /* TARGET_NR_getdents64 */
7102 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
7103 #ifdef TARGET_S390X
7104 case TARGET_NR_select:
7105 #else
7106 case TARGET_NR__newselect:
7107 #endif
7108 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7109 break;
7110 #endif
7111 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7112 # ifdef TARGET_NR_poll
7113 case TARGET_NR_poll:
7114 # endif
7115 # ifdef TARGET_NR_ppoll
7116 case TARGET_NR_ppoll:
7117 # endif
7118 {
7119 struct target_pollfd *target_pfd;
7120 unsigned int nfds = arg2;
7121 int timeout = arg3;
7122 struct pollfd *pfd;
7123 unsigned int i;
7124
7125 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7126 if (!target_pfd)
7127 goto efault;
7128
7129 pfd = alloca(sizeof(struct pollfd) * nfds);
7130 for(i = 0; i < nfds; i++) {
7131 pfd[i].fd = tswap32(target_pfd[i].fd);
7132 pfd[i].events = tswap16(target_pfd[i].events);
7133 }
7134
7135 # ifdef TARGET_NR_ppoll
7136 if (num == TARGET_NR_ppoll) {
7137 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7138 target_sigset_t *target_set;
7139 sigset_t _set, *set = &_set;
7140
7141 if (arg3) {
7142 if (target_to_host_timespec(timeout_ts, arg3)) {
7143 unlock_user(target_pfd, arg1, 0);
7144 goto efault;
7145 }
7146 } else {
7147 timeout_ts = NULL;
7148 }
7149
7150 if (arg4) {
7151 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7152 if (!target_set) {
7153 unlock_user(target_pfd, arg1, 0);
7154 goto efault;
7155 }
7156 target_to_host_sigset(set, target_set);
7157 } else {
7158 set = NULL;
7159 }
7160
7161 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7162
7163 if (!is_error(ret) && arg3) {
7164 host_to_target_timespec(arg3, timeout_ts);
7165 }
7166 if (arg4) {
7167 unlock_user(target_set, arg4, 0);
7168 }
7169 } else
7170 # endif
7171 ret = get_errno(poll(pfd, nfds, timeout));
7172
7173 if (!is_error(ret)) {
7174 for(i = 0; i < nfds; i++) {
7175 target_pfd[i].revents = tswap16(pfd[i].revents);
7176 }
7177 }
7178 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7179 }
7180 break;
7181 #endif
7182 case TARGET_NR_flock:
7183 /* NOTE: the flock constant seems to be the same for every
7184 Linux platform */
7185 ret = get_errno(flock(arg1, arg2));
7186 break;
7187 case TARGET_NR_readv:
7188 {
7189 int count = arg3;
7190 struct iovec *vec;
7191
7192 vec = alloca(count * sizeof(struct iovec));
7193 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
7194 goto efault;
7195 ret = get_errno(readv(arg1, vec, count));
7196 unlock_iovec(vec, arg2, count, 1);
7197 }
7198 break;
7199 case TARGET_NR_writev:
7200 {
7201 int count = arg3;
7202 struct iovec *vec;
7203
7204 vec = alloca(count * sizeof(struct iovec));
7205 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7206 goto efault;
7207 ret = get_errno(writev(arg1, vec, count));
7208 unlock_iovec(vec, arg2, count, 0);
7209 }
7210 break;
7211 case TARGET_NR_getsid:
7212 ret = get_errno(getsid(arg1));
7213 break;
7214 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7215 case TARGET_NR_fdatasync:
7216 ret = get_errno(fdatasync(arg1));
7217 break;
7218 #endif
7219 case TARGET_NR__sysctl:
7220 /* We don't implement this, but ENOTDIR is always a safe
7221 return value. */
7222 ret = -TARGET_ENOTDIR;
7223 break;
7224 case TARGET_NR_sched_getaffinity:
7225 {
7226 unsigned int mask_size;
7227 unsigned long *mask;
7228
7229 /*
7230 * sched_getaffinity needs multiples of ulong, so need to take
7231 * care of mismatches between target ulong and host ulong sizes.
7232 */
7233 if (arg2 & (sizeof(abi_ulong) - 1)) {
7234 ret = -TARGET_EINVAL;
7235 break;
7236 }
7237 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7238
7239 mask = alloca(mask_size);
7240 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7241
7242 if (!is_error(ret)) {
7243 if (copy_to_user(arg3, mask, ret)) {
7244 goto efault;
7245 }
7246 }
7247 }
7248 break;
7249 case TARGET_NR_sched_setaffinity:
7250 {
7251 unsigned int mask_size;
7252 unsigned long *mask;
7253
7254 /*
7255 * sched_setaffinity needs multiples of ulong, so need to take
7256 * care of mismatches between target ulong and host ulong sizes.
7257 */
7258 if (arg2 & (sizeof(abi_ulong) - 1)) {
7259 ret = -TARGET_EINVAL;
7260 break;
7261 }
7262 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7263
7264 mask = alloca(mask_size);
7265 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7266 goto efault;
7267 }
7268 memcpy(mask, p, arg2);
7269 unlock_user_struct(p, arg2, 0);
7270
7271 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7272 }
7273 break;
7274 case TARGET_NR_sched_setparam:
7275 {
7276 struct sched_param *target_schp;
7277 struct sched_param schp;
7278
7279 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7280 goto efault;
7281 schp.sched_priority = tswap32(target_schp->sched_priority);
7282 unlock_user_struct(target_schp, arg2, 0);
7283 ret = get_errno(sched_setparam(arg1, &schp));
7284 }
7285 break;
7286 case TARGET_NR_sched_getparam:
7287 {
7288 struct sched_param *target_schp;
7289 struct sched_param schp;
7290 ret = get_errno(sched_getparam(arg1, &schp));
7291 if (!is_error(ret)) {
7292 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7293 goto efault;
7294 target_schp->sched_priority = tswap32(schp.sched_priority);
7295 unlock_user_struct(target_schp, arg2, 1);
7296 }
7297 }
7298 break;
7299 case TARGET_NR_sched_setscheduler:
7300 {
7301 struct sched_param *target_schp;
7302 struct sched_param schp;
7303 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7304 goto efault;
7305 schp.sched_priority = tswap32(target_schp->sched_priority);
7306 unlock_user_struct(target_schp, arg3, 0);
7307 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7308 }
7309 break;
7310 case TARGET_NR_sched_getscheduler:
7311 ret = get_errno(sched_getscheduler(arg1));
7312 break;
7313 case TARGET_NR_sched_yield:
7314 ret = get_errno(sched_yield());
7315 break;
7316 case TARGET_NR_sched_get_priority_max:
7317 ret = get_errno(sched_get_priority_max(arg1));
7318 break;
7319 case TARGET_NR_sched_get_priority_min:
7320 ret = get_errno(sched_get_priority_min(arg1));
7321 break;
7322 case TARGET_NR_sched_rr_get_interval:
7323 {
7324 struct timespec ts;
7325 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7326 if (!is_error(ret)) {
7327 host_to_target_timespec(arg2, &ts);
7328 }
7329 }
7330 break;
7331 case TARGET_NR_nanosleep:
7332 {
7333 struct timespec req, rem;
7334 target_to_host_timespec(&req, arg1);
7335 ret = get_errno(nanosleep(&req, &rem));
7336 if (is_error(ret) && arg2) {
7337 host_to_target_timespec(arg2, &rem);
7338 }
7339 }
7340 break;
7341 #ifdef TARGET_NR_query_module
7342 case TARGET_NR_query_module:
7343 goto unimplemented;
7344 #endif
7345 #ifdef TARGET_NR_nfsservctl
7346 case TARGET_NR_nfsservctl:
7347 goto unimplemented;
7348 #endif
7349 case TARGET_NR_prctl:
7350 switch (arg1) {
7351 case PR_GET_PDEATHSIG:
7352 {
7353 int deathsig;
7354 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7355 if (!is_error(ret) && arg2
7356 && put_user_ual(deathsig, arg2)) {
7357 goto efault;
7358 }
7359 break;
7360 }
7361 #ifdef PR_GET_NAME
7362 case PR_GET_NAME:
7363 {
7364 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7365 if (!name) {
7366 goto efault;
7367 }
7368 ret = get_errno(prctl(arg1, (unsigned long)name,
7369 arg3, arg4, arg5));
7370 unlock_user(name, arg2, 16);
7371 break;
7372 }
7373 case PR_SET_NAME:
7374 {
7375 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7376 if (!name) {
7377 goto efault;
7378 }
7379 ret = get_errno(prctl(arg1, (unsigned long)name,
7380 arg3, arg4, arg5));
7381 unlock_user(name, arg2, 0);
7382 break;
7383 }
7384 #endif
7385 default:
7386 /* Most prctl options have no pointer arguments */
7387 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7388 break;
7389 }
7390 break;
7391 #ifdef TARGET_NR_arch_prctl
7392 case TARGET_NR_arch_prctl:
7393 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7394 ret = do_arch_prctl(cpu_env, arg1, arg2);
7395 break;
7396 #else
7397 goto unimplemented;
7398 #endif
7399 #endif
7400 #ifdef TARGET_NR_pread
7401 case TARGET_NR_pread:
7402 if (regpairs_aligned(cpu_env))
7403 arg4 = arg5;
7404 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7405 goto efault;
7406 ret = get_errno(pread(arg1, p, arg3, arg4));
7407 unlock_user(p, arg2, ret);
7408 break;
7409 case TARGET_NR_pwrite:
7410 if (regpairs_aligned(cpu_env))
7411 arg4 = arg5;
7412 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7413 goto efault;
7414 ret = get_errno(pwrite(arg1, p, arg3, arg4));
7415 unlock_user(p, arg2, 0);
7416 break;
7417 #endif
7418 #ifdef TARGET_NR_pread64
7419 case TARGET_NR_pread64:
7420 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7421 goto efault;
7422 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7423 unlock_user(p, arg2, ret);
7424 break;
7425 case TARGET_NR_pwrite64:
7426 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7427 goto efault;
7428 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7429 unlock_user(p, arg2, 0);
7430 break;
7431 #endif
7432 case TARGET_NR_getcwd:
7433 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7434 goto efault;
7435 ret = get_errno(sys_getcwd1(p, arg2));
7436 unlock_user(p, arg1, ret);
7437 break;
7438 case TARGET_NR_capget:
7439 goto unimplemented;
7440 case TARGET_NR_capset:
7441 goto unimplemented;
7442 case TARGET_NR_sigaltstack:
7443 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7444 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7445 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7446 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7447 break;
7448 #else
7449 goto unimplemented;
7450 #endif
7451 case TARGET_NR_sendfile:
7452 goto unimplemented;
7453 #ifdef TARGET_NR_getpmsg
7454 case TARGET_NR_getpmsg:
7455 goto unimplemented;
7456 #endif
7457 #ifdef TARGET_NR_putpmsg
7458 case TARGET_NR_putpmsg:
7459 goto unimplemented;
7460 #endif
7461 #ifdef TARGET_NR_vfork
7462 case TARGET_NR_vfork:
7463 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7464 0, 0, 0, 0));
7465 break;
7466 #endif
7467 #ifdef TARGET_NR_ugetrlimit
7468 case TARGET_NR_ugetrlimit:
7469 {
7470 struct rlimit rlim;
7471 int resource = target_to_host_resource(arg1);
7472 ret = get_errno(getrlimit(resource, &rlim));
7473 if (!is_error(ret)) {
7474 struct target_rlimit *target_rlim;
7475 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7476 goto efault;
7477 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7478 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7479 unlock_user_struct(target_rlim, arg2, 1);
7480 }
7481 break;
7482 }
7483 #endif
7484 #ifdef TARGET_NR_truncate64
7485 case TARGET_NR_truncate64:
7486 if (!(p = lock_user_string(arg1)))
7487 goto efault;
7488 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7489 unlock_user(p, arg1, 0);
7490 break;
7491 #endif
7492 #ifdef TARGET_NR_ftruncate64
7493 case TARGET_NR_ftruncate64:
7494 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7495 break;
7496 #endif
7497 #ifdef TARGET_NR_stat64
7498 case TARGET_NR_stat64:
7499 if (!(p = lock_user_string(arg1)))
7500 goto efault;
7501 ret = get_errno(stat(path(p), &st));
7502 unlock_user(p, arg1, 0);
7503 if (!is_error(ret))
7504 ret = host_to_target_stat64(cpu_env, arg2, &st);
7505 break;
7506 #endif
7507 #ifdef TARGET_NR_lstat64
7508 case TARGET_NR_lstat64:
7509 if (!(p = lock_user_string(arg1)))
7510 goto efault;
7511 ret = get_errno(lstat(path(p), &st));
7512 unlock_user(p, arg1, 0);
7513 if (!is_error(ret))
7514 ret = host_to_target_stat64(cpu_env, arg2, &st);
7515 break;
7516 #endif
7517 #ifdef TARGET_NR_fstat64
7518 case TARGET_NR_fstat64:
7519 ret = get_errno(fstat(arg1, &st));
7520 if (!is_error(ret))
7521 ret = host_to_target_stat64(cpu_env, arg2, &st);
7522 break;
7523 #endif
7524 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7525 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7526 #ifdef TARGET_NR_fstatat64
7527 case TARGET_NR_fstatat64:
7528 #endif
7529 #ifdef TARGET_NR_newfstatat
7530 case TARGET_NR_newfstatat:
7531 #endif
7532 if (!(p = lock_user_string(arg2)))
7533 goto efault;
7534 #ifdef __NR_fstatat64
7535 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7536 #else
7537 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7538 #endif
7539 if (!is_error(ret))
7540 ret = host_to_target_stat64(cpu_env, arg3, &st);
7541 break;
7542 #endif
7543 case TARGET_NR_lchown:
7544 if (!(p = lock_user_string(arg1)))
7545 goto efault;
7546 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7547 unlock_user(p, arg1, 0);
7548 break;
7549 #ifdef TARGET_NR_getuid
7550 case TARGET_NR_getuid:
7551 ret = get_errno(high2lowuid(getuid()));
7552 break;
7553 #endif
7554 #ifdef TARGET_NR_getgid
7555 case TARGET_NR_getgid:
7556 ret = get_errno(high2lowgid(getgid()));
7557 break;
7558 #endif
7559 #ifdef TARGET_NR_geteuid
7560 case TARGET_NR_geteuid:
7561 ret = get_errno(high2lowuid(geteuid()));
7562 break;
7563 #endif
7564 #ifdef TARGET_NR_getegid
7565 case TARGET_NR_getegid:
7566 ret = get_errno(high2lowgid(getegid()));
7567 break;
7568 #endif
7569 case TARGET_NR_setreuid:
7570 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7571 break;
7572 case TARGET_NR_setregid:
7573 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7574 break;
7575 case TARGET_NR_getgroups:
7576 {
7577 int gidsetsize = arg1;
7578 target_id *target_grouplist;
7579 gid_t *grouplist;
7580 int i;
7581
7582 grouplist = alloca(gidsetsize * sizeof(gid_t));
7583 ret = get_errno(getgroups(gidsetsize, grouplist));
7584 if (gidsetsize == 0)
7585 break;
7586 if (!is_error(ret)) {
7587 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7588 if (!target_grouplist)
7589 goto efault;
7590 for(i = 0;i < ret; i++)
7591 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7592 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7593 }
7594 }
7595 break;
7596 case TARGET_NR_setgroups:
7597 {
7598 int gidsetsize = arg1;
7599 target_id *target_grouplist;
7600 gid_t *grouplist;
7601 int i;
7602
7603 grouplist = alloca(gidsetsize * sizeof(gid_t));
7604 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7605 if (!target_grouplist) {
7606 ret = -TARGET_EFAULT;
7607 goto fail;
7608 }
7609 for(i = 0;i < gidsetsize; i++)
7610 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7611 unlock_user(target_grouplist, arg2, 0);
7612 ret = get_errno(setgroups(gidsetsize, grouplist));
7613 }
7614 break;
7615 case TARGET_NR_fchown:
7616 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7617 break;
7618 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7619 case TARGET_NR_fchownat:
7620 if (!(p = lock_user_string(arg2)))
7621 goto efault;
7622 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7623 unlock_user(p, arg2, 0);
7624 break;
7625 #endif
7626 #ifdef TARGET_NR_setresuid
7627 case TARGET_NR_setresuid:
7628 ret = get_errno(setresuid(low2highuid(arg1),
7629 low2highuid(arg2),
7630 low2highuid(arg3)));
7631 break;
7632 #endif
7633 #ifdef TARGET_NR_getresuid
7634 case TARGET_NR_getresuid:
7635 {
7636 uid_t ruid, euid, suid;
7637 ret = get_errno(getresuid(&ruid, &euid, &suid));
7638 if (!is_error(ret)) {
7639 if (put_user_u16(high2lowuid(ruid), arg1)
7640 || put_user_u16(high2lowuid(euid), arg2)
7641 || put_user_u16(high2lowuid(suid), arg3))
7642 goto efault;
7643 }
7644 }
7645 break;
7646 #endif
7647 #ifdef TARGET_NR_getresgid
7648 case TARGET_NR_setresgid:
7649 ret = get_errno(setresgid(low2highgid(arg1),
7650 low2highgid(arg2),
7651 low2highgid(arg3)));
7652 break;
7653 #endif
7654 #ifdef TARGET_NR_getresgid
7655 case TARGET_NR_getresgid:
7656 {
7657 gid_t rgid, egid, sgid;
7658 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7659 if (!is_error(ret)) {
7660 if (put_user_u16(high2lowgid(rgid), arg1)
7661 || put_user_u16(high2lowgid(egid), arg2)
7662 || put_user_u16(high2lowgid(sgid), arg3))
7663 goto efault;
7664 }
7665 }
7666 break;
7667 #endif
7668 case TARGET_NR_chown:
7669 if (!(p = lock_user_string(arg1)))
7670 goto efault;
7671 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7672 unlock_user(p, arg1, 0);
7673 break;
7674 case TARGET_NR_setuid:
7675 ret = get_errno(setuid(low2highuid(arg1)));
7676 break;
7677 case TARGET_NR_setgid:
7678 ret = get_errno(setgid(low2highgid(arg1)));
7679 break;
7680 case TARGET_NR_setfsuid:
7681 ret = get_errno(setfsuid(arg1));
7682 break;
7683 case TARGET_NR_setfsgid:
7684 ret = get_errno(setfsgid(arg1));
7685 break;
7686
7687 #ifdef TARGET_NR_lchown32
7688 case TARGET_NR_lchown32:
7689 if (!(p = lock_user_string(arg1)))
7690 goto efault;
7691 ret = get_errno(lchown(p, arg2, arg3));
7692 unlock_user(p, arg1, 0);
7693 break;
7694 #endif
7695 #ifdef TARGET_NR_getuid32
7696 case TARGET_NR_getuid32:
7697 ret = get_errno(getuid());
7698 break;
7699 #endif
7700
7701 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7702 /* Alpha specific */
7703 case TARGET_NR_getxuid:
7704 {
7705 uid_t euid;
7706 euid=geteuid();
7707 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7708 }
7709 ret = get_errno(getuid());
7710 break;
7711 #endif
7712 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7713 /* Alpha specific */
7714 case TARGET_NR_getxgid:
7715 {
7716 uid_t egid;
7717 egid=getegid();
7718 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7719 }
7720 ret = get_errno(getgid());
7721 break;
7722 #endif
7723 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7724 /* Alpha specific */
7725 case TARGET_NR_osf_getsysinfo:
7726 ret = -TARGET_EOPNOTSUPP;
7727 switch (arg1) {
7728 case TARGET_GSI_IEEE_FP_CONTROL:
7729 {
7730 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7731
7732 /* Copied from linux ieee_fpcr_to_swcr. */
7733 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7734 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7735 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7736 | SWCR_TRAP_ENABLE_DZE
7737 | SWCR_TRAP_ENABLE_OVF);
7738 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7739 | SWCR_TRAP_ENABLE_INE);
7740 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7741 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7742
7743 if (put_user_u64 (swcr, arg2))
7744 goto efault;
7745 ret = 0;
7746 }
7747 break;
7748
7749 /* case GSI_IEEE_STATE_AT_SIGNAL:
7750 -- Not implemented in linux kernel.
7751 case GSI_UACPROC:
7752 -- Retrieves current unaligned access state; not much used.
7753 case GSI_PROC_TYPE:
7754 -- Retrieves implver information; surely not used.
7755 case GSI_GET_HWRPB:
7756 -- Grabs a copy of the HWRPB; surely not used.
7757 */
7758 }
7759 break;
7760 #endif
7761 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7762 /* Alpha specific */
7763 case TARGET_NR_osf_setsysinfo:
7764 ret = -TARGET_EOPNOTSUPP;
7765 switch (arg1) {
7766 case TARGET_SSI_IEEE_FP_CONTROL:
7767 {
7768 uint64_t swcr, fpcr, orig_fpcr;
7769
7770 if (get_user_u64 (swcr, arg2)) {
7771 goto efault;
7772 }
7773 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7774 fpcr = orig_fpcr & FPCR_DYN_MASK;
7775
7776 /* Copied from linux ieee_swcr_to_fpcr. */
7777 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7778 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7779 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7780 | SWCR_TRAP_ENABLE_DZE
7781 | SWCR_TRAP_ENABLE_OVF)) << 48;
7782 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7783 | SWCR_TRAP_ENABLE_INE)) << 57;
7784 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7785 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7786
7787 cpu_alpha_store_fpcr(cpu_env, fpcr);
7788 ret = 0;
7789 }
7790 break;
7791
7792 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7793 {
7794 uint64_t exc, fpcr, orig_fpcr;
7795 int si_code;
7796
7797 if (get_user_u64(exc, arg2)) {
7798 goto efault;
7799 }
7800
7801 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7802
7803 /* We only add to the exception status here. */
7804 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7805
7806 cpu_alpha_store_fpcr(cpu_env, fpcr);
7807 ret = 0;
7808
7809 /* Old exceptions are not signaled. */
7810 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7811
7812 /* If any exceptions set by this call,
7813 and are unmasked, send a signal. */
7814 si_code = 0;
7815 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7816 si_code = TARGET_FPE_FLTRES;
7817 }
7818 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7819 si_code = TARGET_FPE_FLTUND;
7820 }
7821 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7822 si_code = TARGET_FPE_FLTOVF;
7823 }
7824 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7825 si_code = TARGET_FPE_FLTDIV;
7826 }
7827 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7828 si_code = TARGET_FPE_FLTINV;
7829 }
7830 if (si_code != 0) {
7831 target_siginfo_t info;
7832 info.si_signo = SIGFPE;
7833 info.si_errno = 0;
7834 info.si_code = si_code;
7835 info._sifields._sigfault._addr
7836 = ((CPUArchState *)cpu_env)->pc;
7837 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7838 }
7839 }
7840 break;
7841
7842 /* case SSI_NVPAIRS:
7843 -- Used with SSIN_UACPROC to enable unaligned accesses.
7844 case SSI_IEEE_STATE_AT_SIGNAL:
7845 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7846 -- Not implemented in linux kernel
7847 */
7848 }
7849 break;
7850 #endif
7851 #ifdef TARGET_NR_osf_sigprocmask
7852 /* Alpha specific. */
7853 case TARGET_NR_osf_sigprocmask:
7854 {
7855 abi_ulong mask;
7856 int how;
7857 sigset_t set, oldset;
7858
7859 switch(arg1) {
7860 case TARGET_SIG_BLOCK:
7861 how = SIG_BLOCK;
7862 break;
7863 case TARGET_SIG_UNBLOCK:
7864 how = SIG_UNBLOCK;
7865 break;
7866 case TARGET_SIG_SETMASK:
7867 how = SIG_SETMASK;
7868 break;
7869 default:
7870 ret = -TARGET_EINVAL;
7871 goto fail;
7872 }
7873 mask = arg2;
7874 target_to_host_old_sigset(&set, &mask);
7875 sigprocmask(how, &set, &oldset);
7876 host_to_target_old_sigset(&mask, &oldset);
7877 ret = mask;
7878 }
7879 break;
7880 #endif
7881
7882 #ifdef TARGET_NR_getgid32
7883 case TARGET_NR_getgid32:
7884 ret = get_errno(getgid());
7885 break;
7886 #endif
7887 #ifdef TARGET_NR_geteuid32
7888 case TARGET_NR_geteuid32:
7889 ret = get_errno(geteuid());
7890 break;
7891 #endif
7892 #ifdef TARGET_NR_getegid32
7893 case TARGET_NR_getegid32:
7894 ret = get_errno(getegid());
7895 break;
7896 #endif
7897 #ifdef TARGET_NR_setreuid32
7898 case TARGET_NR_setreuid32:
7899 ret = get_errno(setreuid(arg1, arg2));
7900 break;
7901 #endif
7902 #ifdef TARGET_NR_setregid32
7903 case TARGET_NR_setregid32:
7904 ret = get_errno(setregid(arg1, arg2));
7905 break;
7906 #endif
7907 #ifdef TARGET_NR_getgroups32
7908 case TARGET_NR_getgroups32:
7909 {
7910 int gidsetsize = arg1;
7911 uint32_t *target_grouplist;
7912 gid_t *grouplist;
7913 int i;
7914
7915 grouplist = alloca(gidsetsize * sizeof(gid_t));
7916 ret = get_errno(getgroups(gidsetsize, grouplist));
7917 if (gidsetsize == 0)
7918 break;
7919 if (!is_error(ret)) {
7920 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7921 if (!target_grouplist) {
7922 ret = -TARGET_EFAULT;
7923 goto fail;
7924 }
7925 for(i = 0;i < ret; i++)
7926 target_grouplist[i] = tswap32(grouplist[i]);
7927 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7928 }
7929 }
7930 break;
7931 #endif
7932 #ifdef TARGET_NR_setgroups32
7933 case TARGET_NR_setgroups32:
7934 {
7935 int gidsetsize = arg1;
7936 uint32_t *target_grouplist;
7937 gid_t *grouplist;
7938 int i;
7939
7940 grouplist = alloca(gidsetsize * sizeof(gid_t));
7941 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7942 if (!target_grouplist) {
7943 ret = -TARGET_EFAULT;
7944 goto fail;
7945 }
7946 for(i = 0;i < gidsetsize; i++)
7947 grouplist[i] = tswap32(target_grouplist[i]);
7948 unlock_user(target_grouplist, arg2, 0);
7949 ret = get_errno(setgroups(gidsetsize, grouplist));
7950 }
7951 break;
7952 #endif
7953 #ifdef TARGET_NR_fchown32
7954 case TARGET_NR_fchown32:
7955 ret = get_errno(fchown(arg1, arg2, arg3));
7956 break;
7957 #endif
7958 #ifdef TARGET_NR_setresuid32
7959 case TARGET_NR_setresuid32:
7960 ret = get_errno(setresuid(arg1, arg2, arg3));
7961 break;
7962 #endif
7963 #ifdef TARGET_NR_getresuid32
7964 case TARGET_NR_getresuid32:
7965 {
7966 uid_t ruid, euid, suid;
7967 ret = get_errno(getresuid(&ruid, &euid, &suid));
7968 if (!is_error(ret)) {
7969 if (put_user_u32(ruid, arg1)
7970 || put_user_u32(euid, arg2)
7971 || put_user_u32(suid, arg3))
7972 goto efault;
7973 }
7974 }
7975 break;
7976 #endif
7977 #ifdef TARGET_NR_setresgid32
7978 case TARGET_NR_setresgid32:
7979 ret = get_errno(setresgid(arg1, arg2, arg3));
7980 break;
7981 #endif
7982 #ifdef TARGET_NR_getresgid32
7983 case TARGET_NR_getresgid32:
7984 {
7985 gid_t rgid, egid, sgid;
7986 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7987 if (!is_error(ret)) {
7988 if (put_user_u32(rgid, arg1)
7989 || put_user_u32(egid, arg2)
7990 || put_user_u32(sgid, arg3))
7991 goto efault;
7992 }
7993 }
7994 break;
7995 #endif
7996 #ifdef TARGET_NR_chown32
7997 case TARGET_NR_chown32:
7998 if (!(p = lock_user_string(arg1)))
7999 goto efault;
8000 ret = get_errno(chown(p, arg2, arg3));
8001 unlock_user(p, arg1, 0);
8002 break;
8003 #endif
8004 #ifdef TARGET_NR_setuid32
8005 case TARGET_NR_setuid32:
8006 ret = get_errno(setuid(arg1));
8007 break;
8008 #endif
8009 #ifdef TARGET_NR_setgid32
8010 case TARGET_NR_setgid32:
8011 ret = get_errno(setgid(arg1));
8012 break;
8013 #endif
8014 #ifdef TARGET_NR_setfsuid32
8015 case TARGET_NR_setfsuid32:
8016 ret = get_errno(setfsuid(arg1));
8017 break;
8018 #endif
8019 #ifdef TARGET_NR_setfsgid32
8020 case TARGET_NR_setfsgid32:
8021 ret = get_errno(setfsgid(arg1));
8022 break;
8023 #endif
8024
8025 case TARGET_NR_pivot_root:
8026 goto unimplemented;
8027 #ifdef TARGET_NR_mincore
8028 case TARGET_NR_mincore:
8029 {
8030 void *a;
8031 ret = -TARGET_EFAULT;
8032 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8033 goto efault;
8034 if (!(p = lock_user_string(arg3)))
8035 goto mincore_fail;
8036 ret = get_errno(mincore(a, arg2, p));
8037 unlock_user(p, arg3, ret);
8038 mincore_fail:
8039 unlock_user(a, arg1, 0);
8040 }
8041 break;
8042 #endif
8043 #ifdef TARGET_NR_arm_fadvise64_64
8044 case TARGET_NR_arm_fadvise64_64:
8045 {
8046 /*
8047 * arm_fadvise64_64 looks like fadvise64_64 but
8048 * with different argument order
8049 */
8050 abi_long temp;
8051 temp = arg3;
8052 arg3 = arg4;
8053 arg4 = temp;
8054 }
8055 #endif
8056 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8057 #ifdef TARGET_NR_fadvise64_64
8058 case TARGET_NR_fadvise64_64:
8059 #endif
8060 #ifdef TARGET_NR_fadvise64
8061 case TARGET_NR_fadvise64:
8062 #endif
8063 #ifdef TARGET_S390X
8064 switch (arg4) {
8065 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8066 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8067 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8068 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8069 default: break;
8070 }
8071 #endif
8072 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8073 break;
8074 #endif
8075 #ifdef TARGET_NR_madvise
8076 case TARGET_NR_madvise:
8077 /* A straight passthrough may not be safe because qemu sometimes
8078 turns private flie-backed mappings into anonymous mappings.
8079 This will break MADV_DONTNEED.
8080 This is a hint, so ignoring and returning success is ok. */
8081 ret = get_errno(0);
8082 break;
8083 #endif
8084 #if TARGET_ABI_BITS == 32
8085 case TARGET_NR_fcntl64:
8086 {
8087 int cmd;
8088 struct flock64 fl;
8089 struct target_flock64 *target_fl;
8090 #ifdef TARGET_ARM
8091 struct target_eabi_flock64 *target_efl;
8092 #endif
8093
8094 cmd = target_to_host_fcntl_cmd(arg2);
8095 if (cmd == -TARGET_EINVAL) {
8096 ret = cmd;
8097 break;
8098 }
8099
8100 switch(arg2) {
8101 case TARGET_F_GETLK64:
8102 #ifdef TARGET_ARM
8103 if (((CPUARMState *)cpu_env)->eabi) {
8104 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8105 goto efault;
8106 fl.l_type = tswap16(target_efl->l_type);
8107 fl.l_whence = tswap16(target_efl->l_whence);
8108 fl.l_start = tswap64(target_efl->l_start);
8109 fl.l_len = tswap64(target_efl->l_len);
8110 fl.l_pid = tswap32(target_efl->l_pid);
8111 unlock_user_struct(target_efl, arg3, 0);
8112 } else
8113 #endif
8114 {
8115 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8116 goto efault;
8117 fl.l_type = tswap16(target_fl->l_type);
8118 fl.l_whence = tswap16(target_fl->l_whence);
8119 fl.l_start = tswap64(target_fl->l_start);
8120 fl.l_len = tswap64(target_fl->l_len);
8121 fl.l_pid = tswap32(target_fl->l_pid);
8122 unlock_user_struct(target_fl, arg3, 0);
8123 }
8124 ret = get_errno(fcntl(arg1, cmd, &fl));
8125 if (ret == 0) {
8126 #ifdef TARGET_ARM
8127 if (((CPUARMState *)cpu_env)->eabi) {
8128 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8129 goto efault;
8130 target_efl->l_type = tswap16(fl.l_type);
8131 target_efl->l_whence = tswap16(fl.l_whence);
8132 target_efl->l_start = tswap64(fl.l_start);
8133 target_efl->l_len = tswap64(fl.l_len);
8134 target_efl->l_pid = tswap32(fl.l_pid);
8135 unlock_user_struct(target_efl, arg3, 1);
8136 } else
8137 #endif
8138 {
8139 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8140 goto efault;
8141 target_fl->l_type = tswap16(fl.l_type);
8142 target_fl->l_whence = tswap16(fl.l_whence);
8143 target_fl->l_start = tswap64(fl.l_start);
8144 target_fl->l_len = tswap64(fl.l_len);
8145 target_fl->l_pid = tswap32(fl.l_pid);
8146 unlock_user_struct(target_fl, arg3, 1);
8147 }
8148 }
8149 break;
8150
8151 case TARGET_F_SETLK64:
8152 case TARGET_F_SETLKW64:
8153 #ifdef TARGET_ARM
8154 if (((CPUARMState *)cpu_env)->eabi) {
8155 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8156 goto efault;
8157 fl.l_type = tswap16(target_efl->l_type);
8158 fl.l_whence = tswap16(target_efl->l_whence);
8159 fl.l_start = tswap64(target_efl->l_start);
8160 fl.l_len = tswap64(target_efl->l_len);
8161 fl.l_pid = tswap32(target_efl->l_pid);
8162 unlock_user_struct(target_efl, arg3, 0);
8163 } else
8164 #endif
8165 {
8166 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8167 goto efault;
8168 fl.l_type = tswap16(target_fl->l_type);
8169 fl.l_whence = tswap16(target_fl->l_whence);
8170 fl.l_start = tswap64(target_fl->l_start);
8171 fl.l_len = tswap64(target_fl->l_len);
8172 fl.l_pid = tswap32(target_fl->l_pid);
8173 unlock_user_struct(target_fl, arg3, 0);
8174 }
8175 ret = get_errno(fcntl(arg1, cmd, &fl));
8176 break;
8177 default:
8178 ret = do_fcntl(arg1, arg2, arg3);
8179 break;
8180 }
8181 break;
8182 }
8183 #endif
8184 #ifdef TARGET_NR_cacheflush
8185 case TARGET_NR_cacheflush:
8186 /* self-modifying code is handled automatically, so nothing needed */
8187 ret = 0;
8188 break;
8189 #endif
8190 #ifdef TARGET_NR_security
8191 case TARGET_NR_security:
8192 goto unimplemented;
8193 #endif
8194 #ifdef TARGET_NR_getpagesize
8195 case TARGET_NR_getpagesize:
8196 ret = TARGET_PAGE_SIZE;
8197 break;
8198 #endif
8199 case TARGET_NR_gettid:
8200 ret = get_errno(gettid());
8201 break;
8202 #ifdef TARGET_NR_readahead
8203 case TARGET_NR_readahead:
8204 #if TARGET_ABI_BITS == 32
8205 if (regpairs_aligned(cpu_env)) {
8206 arg2 = arg3;
8207 arg3 = arg4;
8208 arg4 = arg5;
8209 }
8210 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8211 #else
8212 ret = get_errno(readahead(arg1, arg2, arg3));
8213 #endif
8214 break;
8215 #endif
8216 #ifdef CONFIG_ATTR
8217 #ifdef TARGET_NR_setxattr
8218 case TARGET_NR_listxattr:
8219 case TARGET_NR_llistxattr:
8220 {
8221 void *p, *b = 0;
8222 if (arg2) {
8223 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8224 if (!b) {
8225 ret = -TARGET_EFAULT;
8226 break;
8227 }
8228 }
8229 p = lock_user_string(arg1);
8230 if (p) {
8231 if (num == TARGET_NR_listxattr) {
8232 ret = get_errno(listxattr(p, b, arg3));
8233 } else {
8234 ret = get_errno(llistxattr(p, b, arg3));
8235 }
8236 } else {
8237 ret = -TARGET_EFAULT;
8238 }
8239 unlock_user(p, arg1, 0);
8240 unlock_user(b, arg2, arg3);
8241 break;
8242 }
8243 case TARGET_NR_flistxattr:
8244 {
8245 void *b = 0;
8246 if (arg2) {
8247 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8248 if (!b) {
8249 ret = -TARGET_EFAULT;
8250 break;
8251 }
8252 }
8253 ret = get_errno(flistxattr(arg1, b, arg3));
8254 unlock_user(b, arg2, arg3);
8255 break;
8256 }
8257 case TARGET_NR_setxattr:
8258 case TARGET_NR_lsetxattr:
8259 {
8260 void *p, *n, *v = 0;
8261 if (arg3) {
8262 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8263 if (!v) {
8264 ret = -TARGET_EFAULT;
8265 break;
8266 }
8267 }
8268 p = lock_user_string(arg1);
8269 n = lock_user_string(arg2);
8270 if (p && n) {
8271 if (num == TARGET_NR_setxattr) {
8272 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8273 } else {
8274 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8275 }
8276 } else {
8277 ret = -TARGET_EFAULT;
8278 }
8279 unlock_user(p, arg1, 0);
8280 unlock_user(n, arg2, 0);
8281 unlock_user(v, arg3, 0);
8282 }
8283 break;
8284 case TARGET_NR_fsetxattr:
8285 {
8286 void *n, *v = 0;
8287 if (arg3) {
8288 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8289 if (!v) {
8290 ret = -TARGET_EFAULT;
8291 break;
8292 }
8293 }
8294 n = lock_user_string(arg2);
8295 if (n) {
8296 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8297 } else {
8298 ret = -TARGET_EFAULT;
8299 }
8300 unlock_user(n, arg2, 0);
8301 unlock_user(v, arg3, 0);
8302 }
8303 break;
8304 case TARGET_NR_getxattr:
8305 case TARGET_NR_lgetxattr:
8306 {
8307 void *p, *n, *v = 0;
8308 if (arg3) {
8309 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8310 if (!v) {
8311 ret = -TARGET_EFAULT;
8312 break;
8313 }
8314 }
8315 p = lock_user_string(arg1);
8316 n = lock_user_string(arg2);
8317 if (p && n) {
8318 if (num == TARGET_NR_getxattr) {
8319 ret = get_errno(getxattr(p, n, v, arg4));
8320 } else {
8321 ret = get_errno(lgetxattr(p, n, v, arg4));
8322 }
8323 } else {
8324 ret = -TARGET_EFAULT;
8325 }
8326 unlock_user(p, arg1, 0);
8327 unlock_user(n, arg2, 0);
8328 unlock_user(v, arg3, arg4);
8329 }
8330 break;
8331 case TARGET_NR_fgetxattr:
8332 {
8333 void *n, *v = 0;
8334 if (arg3) {
8335 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8336 if (!v) {
8337 ret = -TARGET_EFAULT;
8338 break;
8339 }
8340 }
8341 n = lock_user_string(arg2);
8342 if (n) {
8343 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8344 } else {
8345 ret = -TARGET_EFAULT;
8346 }
8347 unlock_user(n, arg2, 0);
8348 unlock_user(v, arg3, arg4);
8349 }
8350 break;
8351 case TARGET_NR_removexattr:
8352 case TARGET_NR_lremovexattr:
8353 {
8354 void *p, *n;
8355 p = lock_user_string(arg1);
8356 n = lock_user_string(arg2);
8357 if (p && n) {
8358 if (num == TARGET_NR_removexattr) {
8359 ret = get_errno(removexattr(p, n));
8360 } else {
8361 ret = get_errno(lremovexattr(p, n));
8362 }
8363 } else {
8364 ret = -TARGET_EFAULT;
8365 }
8366 unlock_user(p, arg1, 0);
8367 unlock_user(n, arg2, 0);
8368 }
8369 break;
8370 case TARGET_NR_fremovexattr:
8371 {
8372 void *n;
8373 n = lock_user_string(arg2);
8374 if (n) {
8375 ret = get_errno(fremovexattr(arg1, n));
8376 } else {
8377 ret = -TARGET_EFAULT;
8378 }
8379 unlock_user(n, arg2, 0);
8380 }
8381 break;
8382 #endif
8383 #endif /* CONFIG_ATTR */
8384 #ifdef TARGET_NR_set_thread_area
8385 case TARGET_NR_set_thread_area:
8386 #if defined(TARGET_MIPS)
8387 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8388 ret = 0;
8389 break;
8390 #elif defined(TARGET_CRIS)
8391 if (arg1 & 0xff)
8392 ret = -TARGET_EINVAL;
8393 else {
8394 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8395 ret = 0;
8396 }
8397 break;
8398 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8399 ret = do_set_thread_area(cpu_env, arg1);
8400 break;
8401 #else
8402 goto unimplemented_nowarn;
8403 #endif
8404 #endif
8405 #ifdef TARGET_NR_get_thread_area
8406 case TARGET_NR_get_thread_area:
8407 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8408 ret = do_get_thread_area(cpu_env, arg1);
8409 #else
8410 goto unimplemented_nowarn;
8411 #endif
8412 #endif
8413 #ifdef TARGET_NR_getdomainname
8414 case TARGET_NR_getdomainname:
8415 goto unimplemented_nowarn;
8416 #endif
8417
8418 #ifdef TARGET_NR_clock_gettime
8419 case TARGET_NR_clock_gettime:
8420 {
8421 struct timespec ts;
8422 ret = get_errno(clock_gettime(arg1, &ts));
8423 if (!is_error(ret)) {
8424 host_to_target_timespec(arg2, &ts);
8425 }
8426 break;
8427 }
8428 #endif
8429 #ifdef TARGET_NR_clock_getres
8430 case TARGET_NR_clock_getres:
8431 {
8432 struct timespec ts;
8433 ret = get_errno(clock_getres(arg1, &ts));
8434 if (!is_error(ret)) {
8435 host_to_target_timespec(arg2, &ts);
8436 }
8437 break;
8438 }
8439 #endif
8440 #ifdef TARGET_NR_clock_nanosleep
8441 case TARGET_NR_clock_nanosleep:
8442 {
8443 struct timespec ts;
8444 target_to_host_timespec(&ts, arg3);
8445 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8446 if (arg4)
8447 host_to_target_timespec(arg4, &ts);
8448 break;
8449 }
8450 #endif
8451
8452 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8453 case TARGET_NR_set_tid_address:
8454 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8455 break;
8456 #endif
8457
8458 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8459 case TARGET_NR_tkill:
8460 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8461 break;
8462 #endif
8463
8464 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8465 case TARGET_NR_tgkill:
8466 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8467 target_to_host_signal(arg3)));
8468 break;
8469 #endif
8470
8471 #ifdef TARGET_NR_set_robust_list
8472 case TARGET_NR_set_robust_list:
8473 goto unimplemented_nowarn;
8474 #endif
8475
8476 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8477 case TARGET_NR_utimensat:
8478 {
8479 struct timespec *tsp, ts[2];
8480 if (!arg3) {
8481 tsp = NULL;
8482 } else {
8483 target_to_host_timespec(ts, arg3);
8484 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8485 tsp = ts;
8486 }
8487 if (!arg2)
8488 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8489 else {
8490 if (!(p = lock_user_string(arg2))) {
8491 ret = -TARGET_EFAULT;
8492 goto fail;
8493 }
8494 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8495 unlock_user(p, arg2, 0);
8496 }
8497 }
8498 break;
8499 #endif
8500 #if defined(CONFIG_USE_NPTL)
8501 case TARGET_NR_futex:
8502 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8503 break;
8504 #endif
8505 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8506 case TARGET_NR_inotify_init:
8507 ret = get_errno(sys_inotify_init());
8508 break;
8509 #endif
8510 #ifdef CONFIG_INOTIFY1
8511 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8512 case TARGET_NR_inotify_init1:
8513 ret = get_errno(sys_inotify_init1(arg1));
8514 break;
8515 #endif
8516 #endif
8517 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8518 case TARGET_NR_inotify_add_watch:
8519 p = lock_user_string(arg2);
8520 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8521 unlock_user(p, arg2, 0);
8522 break;
8523 #endif
8524 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8525 case TARGET_NR_inotify_rm_watch:
8526 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8527 break;
8528 #endif
8529
8530 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8531 case TARGET_NR_mq_open:
8532 {
8533 struct mq_attr posix_mq_attr;
8534
8535 p = lock_user_string(arg1 - 1);
8536 if (arg4 != 0)
8537 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8538 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8539 unlock_user (p, arg1, 0);
8540 }
8541 break;
8542
8543 case TARGET_NR_mq_unlink:
8544 p = lock_user_string(arg1 - 1);
8545 ret = get_errno(mq_unlink(p));
8546 unlock_user (p, arg1, 0);
8547 break;
8548
8549 case TARGET_NR_mq_timedsend:
8550 {
8551 struct timespec ts;
8552
8553 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8554 if (arg5 != 0) {
8555 target_to_host_timespec(&ts, arg5);
8556 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8557 host_to_target_timespec(arg5, &ts);
8558 }
8559 else
8560 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8561 unlock_user (p, arg2, arg3);
8562 }
8563 break;
8564
8565 case TARGET_NR_mq_timedreceive:
8566 {
8567 struct timespec ts;
8568 unsigned int prio;
8569
8570 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8571 if (arg5 != 0) {
8572 target_to_host_timespec(&ts, arg5);
8573 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8574 host_to_target_timespec(arg5, &ts);
8575 }
8576 else
8577 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8578 unlock_user (p, arg2, arg3);
8579 if (arg4 != 0)
8580 put_user_u32(prio, arg4);
8581 }
8582 break;
8583
8584 /* Not implemented for now... */
8585 /* case TARGET_NR_mq_notify: */
8586 /* break; */
8587
8588 case TARGET_NR_mq_getsetattr:
8589 {
8590 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8591 ret = 0;
8592 if (arg3 != 0) {
8593 ret = mq_getattr(arg1, &posix_mq_attr_out);
8594 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8595 }
8596 if (arg2 != 0) {
8597 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8598 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8599 }
8600
8601 }
8602 break;
8603 #endif
8604
8605 #ifdef CONFIG_SPLICE
8606 #ifdef TARGET_NR_tee
8607 case TARGET_NR_tee:
8608 {
8609 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8610 }
8611 break;
8612 #endif
8613 #ifdef TARGET_NR_splice
8614 case TARGET_NR_splice:
8615 {
8616 loff_t loff_in, loff_out;
8617 loff_t *ploff_in = NULL, *ploff_out = NULL;
8618 if(arg2) {
8619 get_user_u64(loff_in, arg2);
8620 ploff_in = &loff_in;
8621 }
8622 if(arg4) {
8623 get_user_u64(loff_out, arg2);
8624 ploff_out = &loff_out;
8625 }
8626 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8627 }
8628 break;
8629 #endif
8630 #ifdef TARGET_NR_vmsplice
8631 case TARGET_NR_vmsplice:
8632 {
8633 int count = arg3;
8634 struct iovec *vec;
8635
8636 vec = alloca(count * sizeof(struct iovec));
8637 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
8638 goto efault;
8639 ret = get_errno(vmsplice(arg1, vec, count, arg4));
8640 unlock_iovec(vec, arg2, count, 0);
8641 }
8642 break;
8643 #endif
8644 #endif /* CONFIG_SPLICE */
8645 #ifdef CONFIG_EVENTFD
8646 #if defined(TARGET_NR_eventfd)
8647 case TARGET_NR_eventfd:
8648 ret = get_errno(eventfd(arg1, 0));
8649 break;
8650 #endif
8651 #if defined(TARGET_NR_eventfd2)
8652 case TARGET_NR_eventfd2:
8653 ret = get_errno(eventfd(arg1, arg2));
8654 break;
8655 #endif
8656 #endif /* CONFIG_EVENTFD */
8657 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8658 case TARGET_NR_fallocate:
8659 #if TARGET_ABI_BITS == 32
8660 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8661 target_offset64(arg5, arg6)));
8662 #else
8663 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8664 #endif
8665 break;
8666 #endif
8667 #if defined(CONFIG_SYNC_FILE_RANGE)
8668 #if defined(TARGET_NR_sync_file_range)
8669 case TARGET_NR_sync_file_range:
8670 #if TARGET_ABI_BITS == 32
8671 #if defined(TARGET_MIPS)
8672 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8673 target_offset64(arg5, arg6), arg7));
8674 #else
8675 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8676 target_offset64(arg4, arg5), arg6));
8677 #endif /* !TARGET_MIPS */
8678 #else
8679 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8680 #endif
8681 break;
8682 #endif
8683 #if defined(TARGET_NR_sync_file_range2)
8684 case TARGET_NR_sync_file_range2:
8685 /* This is like sync_file_range but the arguments are reordered */
8686 #if TARGET_ABI_BITS == 32
8687 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8688 target_offset64(arg5, arg6), arg2));
8689 #else
8690 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8691 #endif
8692 break;
8693 #endif
8694 #endif
8695 #if defined(CONFIG_EPOLL)
8696 #if defined(TARGET_NR_epoll_create)
8697 case TARGET_NR_epoll_create:
8698 ret = get_errno(epoll_create(arg1));
8699 break;
8700 #endif
8701 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8702 case TARGET_NR_epoll_create1:
8703 ret = get_errno(epoll_create1(arg1));
8704 break;
8705 #endif
8706 #if defined(TARGET_NR_epoll_ctl)
8707 case TARGET_NR_epoll_ctl:
8708 {
8709 struct epoll_event ep;
8710 struct epoll_event *epp = 0;
8711 if (arg4) {
8712 struct target_epoll_event *target_ep;
8713 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8714 goto efault;
8715 }
8716 ep.events = tswap32(target_ep->events);
8717 /* The epoll_data_t union is just opaque data to the kernel,
8718 * so we transfer all 64 bits across and need not worry what
8719 * actual data type it is.
8720 */
8721 ep.data.u64 = tswap64(target_ep->data.u64);
8722 unlock_user_struct(target_ep, arg4, 0);
8723 epp = &ep;
8724 }
8725 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8726 break;
8727 }
8728 #endif
8729
8730 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8731 #define IMPLEMENT_EPOLL_PWAIT
8732 #endif
8733 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8734 #if defined(TARGET_NR_epoll_wait)
8735 case TARGET_NR_epoll_wait:
8736 #endif
8737 #if defined(IMPLEMENT_EPOLL_PWAIT)
8738 case TARGET_NR_epoll_pwait:
8739 #endif
8740 {
8741 struct target_epoll_event *target_ep;
8742 struct epoll_event *ep;
8743 int epfd = arg1;
8744 int maxevents = arg3;
8745 int timeout = arg4;
8746
8747 target_ep = lock_user(VERIFY_WRITE, arg2,
8748 maxevents * sizeof(struct target_epoll_event), 1);
8749 if (!target_ep) {
8750 goto efault;
8751 }
8752
8753 ep = alloca(maxevents * sizeof(struct epoll_event));
8754
8755 switch (num) {
8756 #if defined(IMPLEMENT_EPOLL_PWAIT)
8757 case TARGET_NR_epoll_pwait:
8758 {
8759 target_sigset_t *target_set;
8760 sigset_t _set, *set = &_set;
8761
8762 if (arg5) {
8763 target_set = lock_user(VERIFY_READ, arg5,
8764 sizeof(target_sigset_t), 1);
8765 if (!target_set) {
8766 unlock_user(target_ep, arg2, 0);
8767 goto efault;
8768 }
8769 target_to_host_sigset(set, target_set);
8770 unlock_user(target_set, arg5, 0);
8771 } else {
8772 set = NULL;
8773 }
8774
8775 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8776 break;
8777 }
8778 #endif
8779 #if defined(TARGET_NR_epoll_wait)
8780 case TARGET_NR_epoll_wait:
8781 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8782 break;
8783 #endif
8784 default:
8785 ret = -TARGET_ENOSYS;
8786 }
8787 if (!is_error(ret)) {
8788 int i;
8789 for (i = 0; i < ret; i++) {
8790 target_ep[i].events = tswap32(ep[i].events);
8791 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8792 }
8793 }
8794 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8795 break;
8796 }
8797 #endif
8798 #endif
8799 #ifdef TARGET_NR_prlimit64
8800 case TARGET_NR_prlimit64:
8801 {
8802 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8803 struct target_rlimit64 *target_rnew, *target_rold;
8804 struct host_rlimit64 rnew, rold, *rnewp = 0;
8805 if (arg3) {
8806 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8807 goto efault;
8808 }
8809 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8810 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8811 unlock_user_struct(target_rnew, arg3, 0);
8812 rnewp = &rnew;
8813 }
8814
8815 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8816 if (!is_error(ret) && arg4) {
8817 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8818 goto efault;
8819 }
8820 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8821 target_rold->rlim_max = tswap64(rold.rlim_max);
8822 unlock_user_struct(target_rold, arg4, 1);
8823 }
8824 break;
8825 }
8826 #endif
8827 default:
8828 unimplemented:
8829 gemu_log("qemu: Unsupported syscall: %d\n", num);
8830 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8831 unimplemented_nowarn:
8832 #endif
8833 ret = -TARGET_ENOSYS;
8834 break;
8835 }
8836 fail:
8837 #ifdef DEBUG
8838 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8839 #endif
8840 if(do_strace)
8841 print_syscall_ret(num, ret);
8842 return ret;
8843 efault:
8844 ret = -TARGET_EFAULT;
8845 goto fail;
8846 }