]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
linux-user: fake /proc/self/maps
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73 #ifdef CONFIG_ATTR
74 #include "qemu-xattr.h"
75 #endif
76
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include "linux_loop.h"
99 #include "cpu-uname.h"
100
101 #include "qemu.h"
102
103 #if defined(CONFIG_USE_NPTL)
104 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
105 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
106 #else
107 /* XXX: Hardcode the above values. */
108 #define CLONE_NPTL_FLAGS2 0
109 #endif
110
111 //#define DEBUG
112
113 //#include <linux/msdos_fs.h>
114 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
115 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
116
117
118 #undef _syscall0
119 #undef _syscall1
120 #undef _syscall2
121 #undef _syscall3
122 #undef _syscall4
123 #undef _syscall5
124 #undef _syscall6
125
126 #define _syscall0(type,name) \
127 static type name (void) \
128 { \
129 return syscall(__NR_##name); \
130 }
131
132 #define _syscall1(type,name,type1,arg1) \
133 static type name (type1 arg1) \
134 { \
135 return syscall(__NR_##name, arg1); \
136 }
137
138 #define _syscall2(type,name,type1,arg1,type2,arg2) \
139 static type name (type1 arg1,type2 arg2) \
140 { \
141 return syscall(__NR_##name, arg1, arg2); \
142 }
143
144 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
145 static type name (type1 arg1,type2 arg2,type3 arg3) \
146 { \
147 return syscall(__NR_##name, arg1, arg2, arg3); \
148 }
149
150 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
151 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
152 { \
153 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
154 }
155
156 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
157 type5,arg5) \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
159 { \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
161 }
162
163
164 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 type5,arg5,type6,arg6) \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
167 type6 arg6) \
168 { \
169 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
170 }
171
172
173 #define __NR_sys_uname __NR_uname
174 #define __NR_sys_faccessat __NR_faccessat
175 #define __NR_sys_fchmodat __NR_fchmodat
176 #define __NR_sys_fchownat __NR_fchownat
177 #define __NR_sys_fstatat64 __NR_fstatat64
178 #define __NR_sys_futimesat __NR_futimesat
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_linkat __NR_linkat
184 #define __NR_sys_mkdirat __NR_mkdirat
185 #define __NR_sys_mknodat __NR_mknodat
186 #define __NR_sys_newfstatat __NR_newfstatat
187 #define __NR_sys_openat __NR_openat
188 #define __NR_sys_readlinkat __NR_readlinkat
189 #define __NR_sys_renameat __NR_renameat
190 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
191 #define __NR_sys_symlinkat __NR_symlinkat
192 #define __NR_sys_syslog __NR_syslog
193 #define __NR_sys_tgkill __NR_tgkill
194 #define __NR_sys_tkill __NR_tkill
195 #define __NR_sys_unlinkat __NR_unlinkat
196 #define __NR_sys_utimensat __NR_utimensat
197 #define __NR_sys_futex __NR_futex
198 #define __NR_sys_inotify_init __NR_inotify_init
199 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
200 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
201
202 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
203 defined(__s390x__)
204 #define __NR__llseek __NR_lseek
205 #endif
206
207 #ifdef __NR_gettid
208 _syscall0(int, gettid)
209 #else
210 /* This is a replacement for the host gettid() and must return a host
211 errno. */
212 static int gettid(void) {
213 return -ENOSYS;
214 }
215 #endif
216 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
217 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
218 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
219 #endif
220 _syscall2(int, sys_getpriority, int, which, int, who);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
223 loff_t *, res, uint, wh);
224 #endif
225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
227 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
228 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
229 #endif
230 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
231 _syscall2(int,sys_tkill,int,tid,int,sig)
232 #endif
233 #ifdef __NR_exit_group
234 _syscall1(int,exit_group,int,error_code)
235 #endif
236 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
237 _syscall1(int,set_tid_address,int *,tidptr)
238 #endif
239 #if defined(CONFIG_USE_NPTL)
240 #if defined(TARGET_NR_futex) && defined(__NR_futex)
241 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
242 const struct timespec *,timeout,int *,uaddr2,int,val3)
243 #endif
244 #endif
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
250 unsigned long *, user_mask_ptr);
251 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
252 void *, arg);
253
254 static bitmask_transtbl fcntl_flags_tbl[] = {
255 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
256 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
257 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
258 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
259 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
260 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
261 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
262 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
263 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
264 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
265 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
266 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
267 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
268 #if defined(O_DIRECT)
269 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
270 #endif
271 { 0, 0, 0, 0 }
272 };
273
274 #define COPY_UTSNAME_FIELD(dest, src) \
275 do { \
276 /* __NEW_UTS_LEN doesn't include terminating null */ \
277 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
278 (dest)[__NEW_UTS_LEN] = '\0'; \
279 } while (0)
280
281 static int sys_uname(struct new_utsname *buf)
282 {
283 struct utsname uts_buf;
284
285 if (uname(&uts_buf) < 0)
286 return (-1);
287
288 /*
289 * Just in case these have some differences, we
290 * translate utsname to new_utsname (which is the
291 * struct linux kernel uses).
292 */
293
294 memset(buf, 0, sizeof(*buf));
295 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
296 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
297 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
298 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
299 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
300 #ifdef _GNU_SOURCE
301 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
302 #endif
303 return (0);
304
305 #undef COPY_UTSNAME_FIELD
306 }
307
308 static int sys_getcwd1(char *buf, size_t size)
309 {
310 if (getcwd(buf, size) == NULL) {
311 /* getcwd() sets errno */
312 return (-1);
313 }
314 return strlen(buf)+1;
315 }
316
317 #ifdef CONFIG_ATFILE
318 /*
319 * Host system seems to have atfile syscall stubs available. We
320 * now enable them one by one as specified by target syscall_nr.h.
321 */
322
323 #ifdef TARGET_NR_faccessat
324 static int sys_faccessat(int dirfd, const char *pathname, int mode)
325 {
326 return (faccessat(dirfd, pathname, mode, 0));
327 }
328 #endif
329 #ifdef TARGET_NR_fchmodat
330 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
331 {
332 return (fchmodat(dirfd, pathname, mode, 0));
333 }
334 #endif
335 #if defined(TARGET_NR_fchownat)
336 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
337 gid_t group, int flags)
338 {
339 return (fchownat(dirfd, pathname, owner, group, flags));
340 }
341 #endif
342 #ifdef __NR_fstatat64
343 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
344 int flags)
345 {
346 return (fstatat(dirfd, pathname, buf, flags));
347 }
348 #endif
349 #ifdef __NR_newfstatat
350 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
351 int flags)
352 {
353 return (fstatat(dirfd, pathname, buf, flags));
354 }
355 #endif
356 #ifdef TARGET_NR_futimesat
357 static int sys_futimesat(int dirfd, const char *pathname,
358 const struct timeval times[2])
359 {
360 return (futimesat(dirfd, pathname, times));
361 }
362 #endif
363 #ifdef TARGET_NR_linkat
364 static int sys_linkat(int olddirfd, const char *oldpath,
365 int newdirfd, const char *newpath, int flags)
366 {
367 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
368 }
369 #endif
370 #ifdef TARGET_NR_mkdirat
371 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
372 {
373 return (mkdirat(dirfd, pathname, mode));
374 }
375 #endif
376 #ifdef TARGET_NR_mknodat
377 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
378 dev_t dev)
379 {
380 return (mknodat(dirfd, pathname, mode, dev));
381 }
382 #endif
383 #ifdef TARGET_NR_openat
384 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
385 {
386 /*
387 * open(2) has extra parameter 'mode' when called with
388 * flag O_CREAT.
389 */
390 if ((flags & O_CREAT) != 0) {
391 return (openat(dirfd, pathname, flags, mode));
392 }
393 return (openat(dirfd, pathname, flags));
394 }
395 #endif
396 #ifdef TARGET_NR_readlinkat
397 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
398 {
399 return (readlinkat(dirfd, pathname, buf, bufsiz));
400 }
401 #endif
402 #ifdef TARGET_NR_renameat
403 static int sys_renameat(int olddirfd, const char *oldpath,
404 int newdirfd, const char *newpath)
405 {
406 return (renameat(olddirfd, oldpath, newdirfd, newpath));
407 }
408 #endif
409 #ifdef TARGET_NR_symlinkat
410 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
411 {
412 return (symlinkat(oldpath, newdirfd, newpath));
413 }
414 #endif
415 #ifdef TARGET_NR_unlinkat
416 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
417 {
418 return (unlinkat(dirfd, pathname, flags));
419 }
420 #endif
421 #else /* !CONFIG_ATFILE */
422
423 /*
424 * Try direct syscalls instead
425 */
426 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
427 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
428 #endif
429 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
430 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
431 #endif
432 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
433 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
434 uid_t,owner,gid_t,group,int,flags)
435 #endif
436 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
437 defined(__NR_fstatat64)
438 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
439 struct stat *,buf,int,flags)
440 #endif
441 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
442 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
443 const struct timeval *,times)
444 #endif
445 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
446 defined(__NR_newfstatat)
447 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
448 struct stat *,buf,int,flags)
449 #endif
450 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
451 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
452 int,newdirfd,const char *,newpath,int,flags)
453 #endif
454 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
455 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
456 #endif
457 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
458 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
459 mode_t,mode,dev_t,dev)
460 #endif
461 #if defined(TARGET_NR_openat) && defined(__NR_openat)
462 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
463 #endif
464 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
465 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
466 char *,buf,size_t,bufsize)
467 #endif
468 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
469 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
470 int,newdirfd,const char *,newpath)
471 #endif
472 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
473 _syscall3(int,sys_symlinkat,const char *,oldpath,
474 int,newdirfd,const char *,newpath)
475 #endif
476 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
477 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
478 #endif
479
480 #endif /* CONFIG_ATFILE */
481
482 #ifdef CONFIG_UTIMENSAT
483 static int sys_utimensat(int dirfd, const char *pathname,
484 const struct timespec times[2], int flags)
485 {
486 if (pathname == NULL)
487 return futimens(dirfd, times);
488 else
489 return utimensat(dirfd, pathname, times, flags);
490 }
491 #else
492 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
493 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
494 const struct timespec *,tsp,int,flags)
495 #endif
496 #endif /* CONFIG_UTIMENSAT */
497
498 #ifdef CONFIG_INOTIFY
499 #include <sys/inotify.h>
500
501 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
502 static int sys_inotify_init(void)
503 {
504 return (inotify_init());
505 }
506 #endif
507 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
508 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
509 {
510 return (inotify_add_watch(fd, pathname, mask));
511 }
512 #endif
513 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
514 static int sys_inotify_rm_watch(int fd, int32_t wd)
515 {
516 return (inotify_rm_watch(fd, wd));
517 }
518 #endif
519 #ifdef CONFIG_INOTIFY1
520 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
521 static int sys_inotify_init1(int flags)
522 {
523 return (inotify_init1(flags));
524 }
525 #endif
526 #endif
527 #else
528 /* Userspace can usually survive runtime without inotify */
529 #undef TARGET_NR_inotify_init
530 #undef TARGET_NR_inotify_init1
531 #undef TARGET_NR_inotify_add_watch
532 #undef TARGET_NR_inotify_rm_watch
533 #endif /* CONFIG_INOTIFY */
534
535 #if defined(TARGET_NR_ppoll)
536 #ifndef __NR_ppoll
537 # define __NR_ppoll -1
538 #endif
539 #define __NR_sys_ppoll __NR_ppoll
540 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
541 struct timespec *, timeout, const __sigset_t *, sigmask,
542 size_t, sigsetsize)
543 #endif
544
545 #if defined(TARGET_NR_pselect6)
546 #ifndef __NR_pselect6
547 # define __NR_pselect6 -1
548 #endif
549 #define __NR_sys_pselect6 __NR_pselect6
550 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
551 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
552 #endif
553
554 #if defined(TARGET_NR_prlimit64)
555 #ifndef __NR_prlimit64
556 # define __NR_prlimit64 -1
557 #endif
558 #define __NR_sys_prlimit64 __NR_prlimit64
559 /* The glibc rlimit structure may not be that used by the underlying syscall */
560 struct host_rlimit64 {
561 uint64_t rlim_cur;
562 uint64_t rlim_max;
563 };
564 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
565 const struct host_rlimit64 *, new_limit,
566 struct host_rlimit64 *, old_limit)
567 #endif
568
569 extern int personality(int);
570 extern int flock(int, int);
571 extern int setfsuid(int);
572 extern int setfsgid(int);
573 extern int setgroups(int, gid_t *);
574
575 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
576 #ifdef TARGET_ARM
577 static inline int regpairs_aligned(void *cpu_env) {
578 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
579 }
580 #elif defined(TARGET_MIPS)
581 static inline int regpairs_aligned(void *cpu_env) { return 1; }
582 #else
583 static inline int regpairs_aligned(void *cpu_env) { return 0; }
584 #endif
585
586 #define ERRNO_TABLE_SIZE 1200
587
588 /* target_to_host_errno_table[] is initialized from
589 * host_to_target_errno_table[] in syscall_init(). */
590 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
591 };
592
593 /*
594 * This list is the union of errno values overridden in asm-<arch>/errno.h
595 * minus the errnos that are not actually generic to all archs.
596 */
597 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
598 [EIDRM] = TARGET_EIDRM,
599 [ECHRNG] = TARGET_ECHRNG,
600 [EL2NSYNC] = TARGET_EL2NSYNC,
601 [EL3HLT] = TARGET_EL3HLT,
602 [EL3RST] = TARGET_EL3RST,
603 [ELNRNG] = TARGET_ELNRNG,
604 [EUNATCH] = TARGET_EUNATCH,
605 [ENOCSI] = TARGET_ENOCSI,
606 [EL2HLT] = TARGET_EL2HLT,
607 [EDEADLK] = TARGET_EDEADLK,
608 [ENOLCK] = TARGET_ENOLCK,
609 [EBADE] = TARGET_EBADE,
610 [EBADR] = TARGET_EBADR,
611 [EXFULL] = TARGET_EXFULL,
612 [ENOANO] = TARGET_ENOANO,
613 [EBADRQC] = TARGET_EBADRQC,
614 [EBADSLT] = TARGET_EBADSLT,
615 [EBFONT] = TARGET_EBFONT,
616 [ENOSTR] = TARGET_ENOSTR,
617 [ENODATA] = TARGET_ENODATA,
618 [ETIME] = TARGET_ETIME,
619 [ENOSR] = TARGET_ENOSR,
620 [ENONET] = TARGET_ENONET,
621 [ENOPKG] = TARGET_ENOPKG,
622 [EREMOTE] = TARGET_EREMOTE,
623 [ENOLINK] = TARGET_ENOLINK,
624 [EADV] = TARGET_EADV,
625 [ESRMNT] = TARGET_ESRMNT,
626 [ECOMM] = TARGET_ECOMM,
627 [EPROTO] = TARGET_EPROTO,
628 [EDOTDOT] = TARGET_EDOTDOT,
629 [EMULTIHOP] = TARGET_EMULTIHOP,
630 [EBADMSG] = TARGET_EBADMSG,
631 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
632 [EOVERFLOW] = TARGET_EOVERFLOW,
633 [ENOTUNIQ] = TARGET_ENOTUNIQ,
634 [EBADFD] = TARGET_EBADFD,
635 [EREMCHG] = TARGET_EREMCHG,
636 [ELIBACC] = TARGET_ELIBACC,
637 [ELIBBAD] = TARGET_ELIBBAD,
638 [ELIBSCN] = TARGET_ELIBSCN,
639 [ELIBMAX] = TARGET_ELIBMAX,
640 [ELIBEXEC] = TARGET_ELIBEXEC,
641 [EILSEQ] = TARGET_EILSEQ,
642 [ENOSYS] = TARGET_ENOSYS,
643 [ELOOP] = TARGET_ELOOP,
644 [ERESTART] = TARGET_ERESTART,
645 [ESTRPIPE] = TARGET_ESTRPIPE,
646 [ENOTEMPTY] = TARGET_ENOTEMPTY,
647 [EUSERS] = TARGET_EUSERS,
648 [ENOTSOCK] = TARGET_ENOTSOCK,
649 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
650 [EMSGSIZE] = TARGET_EMSGSIZE,
651 [EPROTOTYPE] = TARGET_EPROTOTYPE,
652 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
653 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
654 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
655 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
656 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
657 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
658 [EADDRINUSE] = TARGET_EADDRINUSE,
659 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
660 [ENETDOWN] = TARGET_ENETDOWN,
661 [ENETUNREACH] = TARGET_ENETUNREACH,
662 [ENETRESET] = TARGET_ENETRESET,
663 [ECONNABORTED] = TARGET_ECONNABORTED,
664 [ECONNRESET] = TARGET_ECONNRESET,
665 [ENOBUFS] = TARGET_ENOBUFS,
666 [EISCONN] = TARGET_EISCONN,
667 [ENOTCONN] = TARGET_ENOTCONN,
668 [EUCLEAN] = TARGET_EUCLEAN,
669 [ENOTNAM] = TARGET_ENOTNAM,
670 [ENAVAIL] = TARGET_ENAVAIL,
671 [EISNAM] = TARGET_EISNAM,
672 [EREMOTEIO] = TARGET_EREMOTEIO,
673 [ESHUTDOWN] = TARGET_ESHUTDOWN,
674 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
675 [ETIMEDOUT] = TARGET_ETIMEDOUT,
676 [ECONNREFUSED] = TARGET_ECONNREFUSED,
677 [EHOSTDOWN] = TARGET_EHOSTDOWN,
678 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
679 [EALREADY] = TARGET_EALREADY,
680 [EINPROGRESS] = TARGET_EINPROGRESS,
681 [ESTALE] = TARGET_ESTALE,
682 [ECANCELED] = TARGET_ECANCELED,
683 [ENOMEDIUM] = TARGET_ENOMEDIUM,
684 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
685 #ifdef ENOKEY
686 [ENOKEY] = TARGET_ENOKEY,
687 #endif
688 #ifdef EKEYEXPIRED
689 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
690 #endif
691 #ifdef EKEYREVOKED
692 [EKEYREVOKED] = TARGET_EKEYREVOKED,
693 #endif
694 #ifdef EKEYREJECTED
695 [EKEYREJECTED] = TARGET_EKEYREJECTED,
696 #endif
697 #ifdef EOWNERDEAD
698 [EOWNERDEAD] = TARGET_EOWNERDEAD,
699 #endif
700 #ifdef ENOTRECOVERABLE
701 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
702 #endif
703 };
704
705 static inline int host_to_target_errno(int err)
706 {
707 if(host_to_target_errno_table[err])
708 return host_to_target_errno_table[err];
709 return err;
710 }
711
712 static inline int target_to_host_errno(int err)
713 {
714 if (target_to_host_errno_table[err])
715 return target_to_host_errno_table[err];
716 return err;
717 }
718
719 static inline abi_long get_errno(abi_long ret)
720 {
721 if (ret == -1)
722 return -host_to_target_errno(errno);
723 else
724 return ret;
725 }
726
727 static inline int is_error(abi_long ret)
728 {
729 return (abi_ulong)ret >= (abi_ulong)(-4096);
730 }
731
732 char *target_strerror(int err)
733 {
734 return strerror(target_to_host_errno(err));
735 }
736
737 static abi_ulong target_brk;
738 static abi_ulong target_original_brk;
739 static abi_ulong brk_page;
740
741 void target_set_brk(abi_ulong new_brk)
742 {
743 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
744 brk_page = HOST_PAGE_ALIGN(target_brk);
745 }
746
747 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
748 #define DEBUGF_BRK(message, args...)
749
750 /* do_brk() must return target values and target errnos. */
751 abi_long do_brk(abi_ulong new_brk)
752 {
753 abi_long mapped_addr;
754 int new_alloc_size;
755
756 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk);
757
758 if (!new_brk) {
759 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk);
760 return target_brk;
761 }
762 if (new_brk < target_original_brk) {
763 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk);
764 return target_brk;
765 }
766
767 /* If the new brk is less than the highest page reserved to the
768 * target heap allocation, set it and we're almost done... */
769 if (new_brk <= brk_page) {
770 /* Heap contents are initialized to zero, as for anonymous
771 * mapped pages. */
772 if (new_brk > target_brk) {
773 memset(g2h(target_brk), 0, new_brk - target_brk);
774 }
775 target_brk = new_brk;
776 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk);
777 return target_brk;
778 }
779
780 /* We need to allocate more memory after the brk... Note that
781 * we don't use MAP_FIXED because that will map over the top of
782 * any existing mapping (like the one with the host libc or qemu
783 * itself); instead we treat "mapped but at wrong address" as
784 * a failure and unmap again.
785 */
786 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
787 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
788 PROT_READ|PROT_WRITE,
789 MAP_ANON|MAP_PRIVATE, 0, 0));
790
791 if (mapped_addr == brk_page) {
792 /* Heap contents are initialized to zero, as for anonymous
793 * mapped pages. Technically the new pages are already
794 * initialized to zero since they *are* anonymous mapped
795 * pages, however we have to take care with the contents that
796 * come from the remaining part of the previous page: it may
797 * contains garbage data due to a previous heap usage (grown
798 * then shrunken). */
799 memset(g2h(target_brk), 0, brk_page - target_brk);
800
801 target_brk = new_brk;
802 brk_page = HOST_PAGE_ALIGN(target_brk);
803 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk);
804 return target_brk;
805 } else if (mapped_addr != -1) {
806 /* Mapped but at wrong address, meaning there wasn't actually
807 * enough space for this brk.
808 */
809 target_munmap(mapped_addr, new_alloc_size);
810 mapped_addr = -1;
811 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk);
812 }
813 else {
814 DEBUGF_BRK("%#010x (otherwise)\n", target_brk);
815 }
816
817 #if defined(TARGET_ALPHA)
818 /* We (partially) emulate OSF/1 on Alpha, which requires we
819 return a proper errno, not an unchanged brk value. */
820 return -TARGET_ENOMEM;
821 #endif
822 /* For everything else, return the previous break. */
823 return target_brk;
824 }
825
826 static inline abi_long copy_from_user_fdset(fd_set *fds,
827 abi_ulong target_fds_addr,
828 int n)
829 {
830 int i, nw, j, k;
831 abi_ulong b, *target_fds;
832
833 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
834 if (!(target_fds = lock_user(VERIFY_READ,
835 target_fds_addr,
836 sizeof(abi_ulong) * nw,
837 1)))
838 return -TARGET_EFAULT;
839
840 FD_ZERO(fds);
841 k = 0;
842 for (i = 0; i < nw; i++) {
843 /* grab the abi_ulong */
844 __get_user(b, &target_fds[i]);
845 for (j = 0; j < TARGET_ABI_BITS; j++) {
846 /* check the bit inside the abi_ulong */
847 if ((b >> j) & 1)
848 FD_SET(k, fds);
849 k++;
850 }
851 }
852
853 unlock_user(target_fds, target_fds_addr, 0);
854
855 return 0;
856 }
857
858 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
859 abi_ulong target_fds_addr,
860 int n)
861 {
862 if (target_fds_addr) {
863 if (copy_from_user_fdset(fds, target_fds_addr, n))
864 return -TARGET_EFAULT;
865 *fds_ptr = fds;
866 } else {
867 *fds_ptr = NULL;
868 }
869 return 0;
870 }
871
872 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
873 const fd_set *fds,
874 int n)
875 {
876 int i, nw, j, k;
877 abi_long v;
878 abi_ulong *target_fds;
879
880 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
881 if (!(target_fds = lock_user(VERIFY_WRITE,
882 target_fds_addr,
883 sizeof(abi_ulong) * nw,
884 0)))
885 return -TARGET_EFAULT;
886
887 k = 0;
888 for (i = 0; i < nw; i++) {
889 v = 0;
890 for (j = 0; j < TARGET_ABI_BITS; j++) {
891 v |= ((FD_ISSET(k, fds) != 0) << j);
892 k++;
893 }
894 __put_user(v, &target_fds[i]);
895 }
896
897 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
898
899 return 0;
900 }
901
902 #if defined(__alpha__)
903 #define HOST_HZ 1024
904 #else
905 #define HOST_HZ 100
906 #endif
907
908 static inline abi_long host_to_target_clock_t(long ticks)
909 {
910 #if HOST_HZ == TARGET_HZ
911 return ticks;
912 #else
913 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
914 #endif
915 }
916
917 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
918 const struct rusage *rusage)
919 {
920 struct target_rusage *target_rusage;
921
922 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
923 return -TARGET_EFAULT;
924 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
925 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
926 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
927 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
928 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
929 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
930 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
931 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
932 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
933 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
934 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
935 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
936 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
937 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
938 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
939 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
940 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
941 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
942 unlock_user_struct(target_rusage, target_addr, 1);
943
944 return 0;
945 }
946
947 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
948 {
949 abi_ulong target_rlim_swap;
950 rlim_t result;
951
952 target_rlim_swap = tswapal(target_rlim);
953 if (target_rlim_swap == TARGET_RLIM_INFINITY)
954 return RLIM_INFINITY;
955
956 result = target_rlim_swap;
957 if (target_rlim_swap != (rlim_t)result)
958 return RLIM_INFINITY;
959
960 return result;
961 }
962
963 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
964 {
965 abi_ulong target_rlim_swap;
966 abi_ulong result;
967
968 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
969 target_rlim_swap = TARGET_RLIM_INFINITY;
970 else
971 target_rlim_swap = rlim;
972 result = tswapal(target_rlim_swap);
973
974 return result;
975 }
976
977 static inline int target_to_host_resource(int code)
978 {
979 switch (code) {
980 case TARGET_RLIMIT_AS:
981 return RLIMIT_AS;
982 case TARGET_RLIMIT_CORE:
983 return RLIMIT_CORE;
984 case TARGET_RLIMIT_CPU:
985 return RLIMIT_CPU;
986 case TARGET_RLIMIT_DATA:
987 return RLIMIT_DATA;
988 case TARGET_RLIMIT_FSIZE:
989 return RLIMIT_FSIZE;
990 case TARGET_RLIMIT_LOCKS:
991 return RLIMIT_LOCKS;
992 case TARGET_RLIMIT_MEMLOCK:
993 return RLIMIT_MEMLOCK;
994 case TARGET_RLIMIT_MSGQUEUE:
995 return RLIMIT_MSGQUEUE;
996 case TARGET_RLIMIT_NICE:
997 return RLIMIT_NICE;
998 case TARGET_RLIMIT_NOFILE:
999 return RLIMIT_NOFILE;
1000 case TARGET_RLIMIT_NPROC:
1001 return RLIMIT_NPROC;
1002 case TARGET_RLIMIT_RSS:
1003 return RLIMIT_RSS;
1004 case TARGET_RLIMIT_RTPRIO:
1005 return RLIMIT_RTPRIO;
1006 case TARGET_RLIMIT_SIGPENDING:
1007 return RLIMIT_SIGPENDING;
1008 case TARGET_RLIMIT_STACK:
1009 return RLIMIT_STACK;
1010 default:
1011 return code;
1012 }
1013 }
1014
1015 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1016 abi_ulong target_tv_addr)
1017 {
1018 struct target_timeval *target_tv;
1019
1020 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1021 return -TARGET_EFAULT;
1022
1023 __get_user(tv->tv_sec, &target_tv->tv_sec);
1024 __get_user(tv->tv_usec, &target_tv->tv_usec);
1025
1026 unlock_user_struct(target_tv, target_tv_addr, 0);
1027
1028 return 0;
1029 }
1030
1031 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1032 const struct timeval *tv)
1033 {
1034 struct target_timeval *target_tv;
1035
1036 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1037 return -TARGET_EFAULT;
1038
1039 __put_user(tv->tv_sec, &target_tv->tv_sec);
1040 __put_user(tv->tv_usec, &target_tv->tv_usec);
1041
1042 unlock_user_struct(target_tv, target_tv_addr, 1);
1043
1044 return 0;
1045 }
1046
1047 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1048 #include <mqueue.h>
1049
1050 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1051 abi_ulong target_mq_attr_addr)
1052 {
1053 struct target_mq_attr *target_mq_attr;
1054
1055 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1056 target_mq_attr_addr, 1))
1057 return -TARGET_EFAULT;
1058
1059 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1060 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1061 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1062 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1063
1064 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1065
1066 return 0;
1067 }
1068
1069 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1070 const struct mq_attr *attr)
1071 {
1072 struct target_mq_attr *target_mq_attr;
1073
1074 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1075 target_mq_attr_addr, 0))
1076 return -TARGET_EFAULT;
1077
1078 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1079 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1080 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1081 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1082
1083 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1084
1085 return 0;
1086 }
1087 #endif
1088
1089 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1090 /* do_select() must return target values and target errnos. */
1091 static abi_long do_select(int n,
1092 abi_ulong rfd_addr, abi_ulong wfd_addr,
1093 abi_ulong efd_addr, abi_ulong target_tv_addr)
1094 {
1095 fd_set rfds, wfds, efds;
1096 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1097 struct timeval tv, *tv_ptr;
1098 abi_long ret;
1099
1100 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1101 if (ret) {
1102 return ret;
1103 }
1104 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1105 if (ret) {
1106 return ret;
1107 }
1108 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1109 if (ret) {
1110 return ret;
1111 }
1112
1113 if (target_tv_addr) {
1114 if (copy_from_user_timeval(&tv, target_tv_addr))
1115 return -TARGET_EFAULT;
1116 tv_ptr = &tv;
1117 } else {
1118 tv_ptr = NULL;
1119 }
1120
1121 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1122
1123 if (!is_error(ret)) {
1124 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1125 return -TARGET_EFAULT;
1126 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1127 return -TARGET_EFAULT;
1128 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1129 return -TARGET_EFAULT;
1130
1131 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1132 return -TARGET_EFAULT;
1133 }
1134
1135 return ret;
1136 }
1137 #endif
1138
1139 static abi_long do_pipe2(int host_pipe[], int flags)
1140 {
1141 #ifdef CONFIG_PIPE2
1142 return pipe2(host_pipe, flags);
1143 #else
1144 return -ENOSYS;
1145 #endif
1146 }
1147
1148 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1149 int flags, int is_pipe2)
1150 {
1151 int host_pipe[2];
1152 abi_long ret;
1153 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1154
1155 if (is_error(ret))
1156 return get_errno(ret);
1157
1158 /* Several targets have special calling conventions for the original
1159 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1160 if (!is_pipe2) {
1161 #if defined(TARGET_ALPHA)
1162 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1163 return host_pipe[0];
1164 #elif defined(TARGET_MIPS)
1165 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1166 return host_pipe[0];
1167 #elif defined(TARGET_SH4)
1168 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1169 return host_pipe[0];
1170 #endif
1171 }
1172
1173 if (put_user_s32(host_pipe[0], pipedes)
1174 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1175 return -TARGET_EFAULT;
1176 return get_errno(ret);
1177 }
1178
1179 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1180 abi_ulong target_addr,
1181 socklen_t len)
1182 {
1183 struct target_ip_mreqn *target_smreqn;
1184
1185 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1186 if (!target_smreqn)
1187 return -TARGET_EFAULT;
1188 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1189 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1190 if (len == sizeof(struct target_ip_mreqn))
1191 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1192 unlock_user(target_smreqn, target_addr, 0);
1193
1194 return 0;
1195 }
1196
1197 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1198 abi_ulong target_addr,
1199 socklen_t len)
1200 {
1201 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1202 sa_family_t sa_family;
1203 struct target_sockaddr *target_saddr;
1204
1205 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1206 if (!target_saddr)
1207 return -TARGET_EFAULT;
1208
1209 sa_family = tswap16(target_saddr->sa_family);
1210
1211 /* Oops. The caller might send a incomplete sun_path; sun_path
1212 * must be terminated by \0 (see the manual page), but
1213 * unfortunately it is quite common to specify sockaddr_un
1214 * length as "strlen(x->sun_path)" while it should be
1215 * "strlen(...) + 1". We'll fix that here if needed.
1216 * Linux kernel has a similar feature.
1217 */
1218
1219 if (sa_family == AF_UNIX) {
1220 if (len < unix_maxlen && len > 0) {
1221 char *cp = (char*)target_saddr;
1222
1223 if ( cp[len-1] && !cp[len] )
1224 len++;
1225 }
1226 if (len > unix_maxlen)
1227 len = unix_maxlen;
1228 }
1229
1230 memcpy(addr, target_saddr, len);
1231 addr->sa_family = sa_family;
1232 unlock_user(target_saddr, target_addr, 0);
1233
1234 return 0;
1235 }
1236
1237 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1238 struct sockaddr *addr,
1239 socklen_t len)
1240 {
1241 struct target_sockaddr *target_saddr;
1242
1243 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1244 if (!target_saddr)
1245 return -TARGET_EFAULT;
1246 memcpy(target_saddr, addr, len);
1247 target_saddr->sa_family = tswap16(addr->sa_family);
1248 unlock_user(target_saddr, target_addr, len);
1249
1250 return 0;
1251 }
1252
1253 /* ??? Should this also swap msgh->name? */
1254 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1255 struct target_msghdr *target_msgh)
1256 {
1257 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1258 abi_long msg_controllen;
1259 abi_ulong target_cmsg_addr;
1260 struct target_cmsghdr *target_cmsg;
1261 socklen_t space = 0;
1262
1263 msg_controllen = tswapal(target_msgh->msg_controllen);
1264 if (msg_controllen < sizeof (struct target_cmsghdr))
1265 goto the_end;
1266 target_cmsg_addr = tswapal(target_msgh->msg_control);
1267 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1268 if (!target_cmsg)
1269 return -TARGET_EFAULT;
1270
1271 while (cmsg && target_cmsg) {
1272 void *data = CMSG_DATA(cmsg);
1273 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1274
1275 int len = tswapal(target_cmsg->cmsg_len)
1276 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1277
1278 space += CMSG_SPACE(len);
1279 if (space > msgh->msg_controllen) {
1280 space -= CMSG_SPACE(len);
1281 gemu_log("Host cmsg overflow\n");
1282 break;
1283 }
1284
1285 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1286 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1287 cmsg->cmsg_len = CMSG_LEN(len);
1288
1289 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1290 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1291 memcpy(data, target_data, len);
1292 } else {
1293 int *fd = (int *)data;
1294 int *target_fd = (int *)target_data;
1295 int i, numfds = len / sizeof(int);
1296
1297 for (i = 0; i < numfds; i++)
1298 fd[i] = tswap32(target_fd[i]);
1299 }
1300
1301 cmsg = CMSG_NXTHDR(msgh, cmsg);
1302 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1303 }
1304 unlock_user(target_cmsg, target_cmsg_addr, 0);
1305 the_end:
1306 msgh->msg_controllen = space;
1307 return 0;
1308 }
1309
1310 /* ??? Should this also swap msgh->name? */
1311 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1312 struct msghdr *msgh)
1313 {
1314 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1315 abi_long msg_controllen;
1316 abi_ulong target_cmsg_addr;
1317 struct target_cmsghdr *target_cmsg;
1318 socklen_t space = 0;
1319
1320 msg_controllen = tswapal(target_msgh->msg_controllen);
1321 if (msg_controllen < sizeof (struct target_cmsghdr))
1322 goto the_end;
1323 target_cmsg_addr = tswapal(target_msgh->msg_control);
1324 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1325 if (!target_cmsg)
1326 return -TARGET_EFAULT;
1327
1328 while (cmsg && target_cmsg) {
1329 void *data = CMSG_DATA(cmsg);
1330 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1331
1332 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1333
1334 space += TARGET_CMSG_SPACE(len);
1335 if (space > msg_controllen) {
1336 space -= TARGET_CMSG_SPACE(len);
1337 gemu_log("Target cmsg overflow\n");
1338 break;
1339 }
1340
1341 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1342 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1343 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1344
1345 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1346 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1347 memcpy(target_data, data, len);
1348 } else {
1349 int *fd = (int *)data;
1350 int *target_fd = (int *)target_data;
1351 int i, numfds = len / sizeof(int);
1352
1353 for (i = 0; i < numfds; i++)
1354 target_fd[i] = tswap32(fd[i]);
1355 }
1356
1357 cmsg = CMSG_NXTHDR(msgh, cmsg);
1358 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1359 }
1360 unlock_user(target_cmsg, target_cmsg_addr, space);
1361 the_end:
1362 target_msgh->msg_controllen = tswapal(space);
1363 return 0;
1364 }
1365
1366 /* do_setsockopt() Must return target values and target errnos. */
1367 static abi_long do_setsockopt(int sockfd, int level, int optname,
1368 abi_ulong optval_addr, socklen_t optlen)
1369 {
1370 abi_long ret;
1371 int val;
1372 struct ip_mreqn *ip_mreq;
1373 struct ip_mreq_source *ip_mreq_source;
1374
1375 switch(level) {
1376 case SOL_TCP:
1377 /* TCP options all take an 'int' value. */
1378 if (optlen < sizeof(uint32_t))
1379 return -TARGET_EINVAL;
1380
1381 if (get_user_u32(val, optval_addr))
1382 return -TARGET_EFAULT;
1383 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1384 break;
1385 case SOL_IP:
1386 switch(optname) {
1387 case IP_TOS:
1388 case IP_TTL:
1389 case IP_HDRINCL:
1390 case IP_ROUTER_ALERT:
1391 case IP_RECVOPTS:
1392 case IP_RETOPTS:
1393 case IP_PKTINFO:
1394 case IP_MTU_DISCOVER:
1395 case IP_RECVERR:
1396 case IP_RECVTOS:
1397 #ifdef IP_FREEBIND
1398 case IP_FREEBIND:
1399 #endif
1400 case IP_MULTICAST_TTL:
1401 case IP_MULTICAST_LOOP:
1402 val = 0;
1403 if (optlen >= sizeof(uint32_t)) {
1404 if (get_user_u32(val, optval_addr))
1405 return -TARGET_EFAULT;
1406 } else if (optlen >= 1) {
1407 if (get_user_u8(val, optval_addr))
1408 return -TARGET_EFAULT;
1409 }
1410 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1411 break;
1412 case IP_ADD_MEMBERSHIP:
1413 case IP_DROP_MEMBERSHIP:
1414 if (optlen < sizeof (struct target_ip_mreq) ||
1415 optlen > sizeof (struct target_ip_mreqn))
1416 return -TARGET_EINVAL;
1417
1418 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1419 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1420 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1421 break;
1422
1423 case IP_BLOCK_SOURCE:
1424 case IP_UNBLOCK_SOURCE:
1425 case IP_ADD_SOURCE_MEMBERSHIP:
1426 case IP_DROP_SOURCE_MEMBERSHIP:
1427 if (optlen != sizeof (struct target_ip_mreq_source))
1428 return -TARGET_EINVAL;
1429
1430 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1431 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1432 unlock_user (ip_mreq_source, optval_addr, 0);
1433 break;
1434
1435 default:
1436 goto unimplemented;
1437 }
1438 break;
1439 case TARGET_SOL_SOCKET:
1440 switch (optname) {
1441 /* Options with 'int' argument. */
1442 case TARGET_SO_DEBUG:
1443 optname = SO_DEBUG;
1444 break;
1445 case TARGET_SO_REUSEADDR:
1446 optname = SO_REUSEADDR;
1447 break;
1448 case TARGET_SO_TYPE:
1449 optname = SO_TYPE;
1450 break;
1451 case TARGET_SO_ERROR:
1452 optname = SO_ERROR;
1453 break;
1454 case TARGET_SO_DONTROUTE:
1455 optname = SO_DONTROUTE;
1456 break;
1457 case TARGET_SO_BROADCAST:
1458 optname = SO_BROADCAST;
1459 break;
1460 case TARGET_SO_SNDBUF:
1461 optname = SO_SNDBUF;
1462 break;
1463 case TARGET_SO_RCVBUF:
1464 optname = SO_RCVBUF;
1465 break;
1466 case TARGET_SO_KEEPALIVE:
1467 optname = SO_KEEPALIVE;
1468 break;
1469 case TARGET_SO_OOBINLINE:
1470 optname = SO_OOBINLINE;
1471 break;
1472 case TARGET_SO_NO_CHECK:
1473 optname = SO_NO_CHECK;
1474 break;
1475 case TARGET_SO_PRIORITY:
1476 optname = SO_PRIORITY;
1477 break;
1478 #ifdef SO_BSDCOMPAT
1479 case TARGET_SO_BSDCOMPAT:
1480 optname = SO_BSDCOMPAT;
1481 break;
1482 #endif
1483 case TARGET_SO_PASSCRED:
1484 optname = SO_PASSCRED;
1485 break;
1486 case TARGET_SO_TIMESTAMP:
1487 optname = SO_TIMESTAMP;
1488 break;
1489 case TARGET_SO_RCVLOWAT:
1490 optname = SO_RCVLOWAT;
1491 break;
1492 case TARGET_SO_RCVTIMEO:
1493 optname = SO_RCVTIMEO;
1494 break;
1495 case TARGET_SO_SNDTIMEO:
1496 optname = SO_SNDTIMEO;
1497 break;
1498 break;
1499 default:
1500 goto unimplemented;
1501 }
1502 if (optlen < sizeof(uint32_t))
1503 return -TARGET_EINVAL;
1504
1505 if (get_user_u32(val, optval_addr))
1506 return -TARGET_EFAULT;
1507 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1508 break;
1509 default:
1510 unimplemented:
1511 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1512 ret = -TARGET_ENOPROTOOPT;
1513 }
1514 return ret;
1515 }
1516
1517 /* do_getsockopt() Must return target values and target errnos. */
1518 static abi_long do_getsockopt(int sockfd, int level, int optname,
1519 abi_ulong optval_addr, abi_ulong optlen)
1520 {
1521 abi_long ret;
1522 int len, val;
1523 socklen_t lv;
1524
1525 switch(level) {
1526 case TARGET_SOL_SOCKET:
1527 level = SOL_SOCKET;
1528 switch (optname) {
1529 /* These don't just return a single integer */
1530 case TARGET_SO_LINGER:
1531 case TARGET_SO_RCVTIMEO:
1532 case TARGET_SO_SNDTIMEO:
1533 case TARGET_SO_PEERCRED:
1534 case TARGET_SO_PEERNAME:
1535 goto unimplemented;
1536 /* Options with 'int' argument. */
1537 case TARGET_SO_DEBUG:
1538 optname = SO_DEBUG;
1539 goto int_case;
1540 case TARGET_SO_REUSEADDR:
1541 optname = SO_REUSEADDR;
1542 goto int_case;
1543 case TARGET_SO_TYPE:
1544 optname = SO_TYPE;
1545 goto int_case;
1546 case TARGET_SO_ERROR:
1547 optname = SO_ERROR;
1548 goto int_case;
1549 case TARGET_SO_DONTROUTE:
1550 optname = SO_DONTROUTE;
1551 goto int_case;
1552 case TARGET_SO_BROADCAST:
1553 optname = SO_BROADCAST;
1554 goto int_case;
1555 case TARGET_SO_SNDBUF:
1556 optname = SO_SNDBUF;
1557 goto int_case;
1558 case TARGET_SO_RCVBUF:
1559 optname = SO_RCVBUF;
1560 goto int_case;
1561 case TARGET_SO_KEEPALIVE:
1562 optname = SO_KEEPALIVE;
1563 goto int_case;
1564 case TARGET_SO_OOBINLINE:
1565 optname = SO_OOBINLINE;
1566 goto int_case;
1567 case TARGET_SO_NO_CHECK:
1568 optname = SO_NO_CHECK;
1569 goto int_case;
1570 case TARGET_SO_PRIORITY:
1571 optname = SO_PRIORITY;
1572 goto int_case;
1573 #ifdef SO_BSDCOMPAT
1574 case TARGET_SO_BSDCOMPAT:
1575 optname = SO_BSDCOMPAT;
1576 goto int_case;
1577 #endif
1578 case TARGET_SO_PASSCRED:
1579 optname = SO_PASSCRED;
1580 goto int_case;
1581 case TARGET_SO_TIMESTAMP:
1582 optname = SO_TIMESTAMP;
1583 goto int_case;
1584 case TARGET_SO_RCVLOWAT:
1585 optname = SO_RCVLOWAT;
1586 goto int_case;
1587 default:
1588 goto int_case;
1589 }
1590 break;
1591 case SOL_TCP:
1592 /* TCP options all take an 'int' value. */
1593 int_case:
1594 if (get_user_u32(len, optlen))
1595 return -TARGET_EFAULT;
1596 if (len < 0)
1597 return -TARGET_EINVAL;
1598 lv = sizeof(lv);
1599 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1600 if (ret < 0)
1601 return ret;
1602 if (len > lv)
1603 len = lv;
1604 if (len == 4) {
1605 if (put_user_u32(val, optval_addr))
1606 return -TARGET_EFAULT;
1607 } else {
1608 if (put_user_u8(val, optval_addr))
1609 return -TARGET_EFAULT;
1610 }
1611 if (put_user_u32(len, optlen))
1612 return -TARGET_EFAULT;
1613 break;
1614 case SOL_IP:
1615 switch(optname) {
1616 case IP_TOS:
1617 case IP_TTL:
1618 case IP_HDRINCL:
1619 case IP_ROUTER_ALERT:
1620 case IP_RECVOPTS:
1621 case IP_RETOPTS:
1622 case IP_PKTINFO:
1623 case IP_MTU_DISCOVER:
1624 case IP_RECVERR:
1625 case IP_RECVTOS:
1626 #ifdef IP_FREEBIND
1627 case IP_FREEBIND:
1628 #endif
1629 case IP_MULTICAST_TTL:
1630 case IP_MULTICAST_LOOP:
1631 if (get_user_u32(len, optlen))
1632 return -TARGET_EFAULT;
1633 if (len < 0)
1634 return -TARGET_EINVAL;
1635 lv = sizeof(lv);
1636 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1637 if (ret < 0)
1638 return ret;
1639 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1640 len = 1;
1641 if (put_user_u32(len, optlen)
1642 || put_user_u8(val, optval_addr))
1643 return -TARGET_EFAULT;
1644 } else {
1645 if (len > sizeof(int))
1646 len = sizeof(int);
1647 if (put_user_u32(len, optlen)
1648 || put_user_u32(val, optval_addr))
1649 return -TARGET_EFAULT;
1650 }
1651 break;
1652 default:
1653 ret = -TARGET_ENOPROTOOPT;
1654 break;
1655 }
1656 break;
1657 default:
1658 unimplemented:
1659 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1660 level, optname);
1661 ret = -TARGET_EOPNOTSUPP;
1662 break;
1663 }
1664 return ret;
1665 }
1666
1667 /* FIXME
1668 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1669 * other lock functions have a return code of 0 for failure.
1670 */
1671 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1672 int count, int copy)
1673 {
1674 struct target_iovec *target_vec;
1675 abi_ulong base;
1676 int i;
1677
1678 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1679 if (!target_vec)
1680 return -TARGET_EFAULT;
1681 for(i = 0;i < count; i++) {
1682 base = tswapal(target_vec[i].iov_base);
1683 vec[i].iov_len = tswapal(target_vec[i].iov_len);
1684 if (vec[i].iov_len != 0) {
1685 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1686 /* Don't check lock_user return value. We must call writev even
1687 if a element has invalid base address. */
1688 } else {
1689 /* zero length pointer is ignored */
1690 vec[i].iov_base = NULL;
1691 }
1692 }
1693 unlock_user (target_vec, target_addr, 0);
1694 return 0;
1695 }
1696
1697 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1698 int count, int copy)
1699 {
1700 struct target_iovec *target_vec;
1701 abi_ulong base;
1702 int i;
1703
1704 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1705 if (!target_vec)
1706 return -TARGET_EFAULT;
1707 for(i = 0;i < count; i++) {
1708 if (target_vec[i].iov_base) {
1709 base = tswapal(target_vec[i].iov_base);
1710 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1711 }
1712 }
1713 unlock_user (target_vec, target_addr, 0);
1714
1715 return 0;
1716 }
1717
1718 /* do_socket() Must return target values and target errnos. */
1719 static abi_long do_socket(int domain, int type, int protocol)
1720 {
1721 #if defined(TARGET_MIPS)
1722 switch(type) {
1723 case TARGET_SOCK_DGRAM:
1724 type = SOCK_DGRAM;
1725 break;
1726 case TARGET_SOCK_STREAM:
1727 type = SOCK_STREAM;
1728 break;
1729 case TARGET_SOCK_RAW:
1730 type = SOCK_RAW;
1731 break;
1732 case TARGET_SOCK_RDM:
1733 type = SOCK_RDM;
1734 break;
1735 case TARGET_SOCK_SEQPACKET:
1736 type = SOCK_SEQPACKET;
1737 break;
1738 case TARGET_SOCK_PACKET:
1739 type = SOCK_PACKET;
1740 break;
1741 }
1742 #endif
1743 if (domain == PF_NETLINK)
1744 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1745 return get_errno(socket(domain, type, protocol));
1746 }
1747
1748 /* do_bind() Must return target values and target errnos. */
1749 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1750 socklen_t addrlen)
1751 {
1752 void *addr;
1753 abi_long ret;
1754
1755 if ((int)addrlen < 0) {
1756 return -TARGET_EINVAL;
1757 }
1758
1759 addr = alloca(addrlen+1);
1760
1761 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1762 if (ret)
1763 return ret;
1764
1765 return get_errno(bind(sockfd, addr, addrlen));
1766 }
1767
1768 /* do_connect() Must return target values and target errnos. */
1769 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1770 socklen_t addrlen)
1771 {
1772 void *addr;
1773 abi_long ret;
1774
1775 if ((int)addrlen < 0) {
1776 return -TARGET_EINVAL;
1777 }
1778
1779 addr = alloca(addrlen);
1780
1781 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1782 if (ret)
1783 return ret;
1784
1785 return get_errno(connect(sockfd, addr, addrlen));
1786 }
1787
1788 /* do_sendrecvmsg() Must return target values and target errnos. */
1789 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1790 int flags, int send)
1791 {
1792 abi_long ret, len;
1793 struct target_msghdr *msgp;
1794 struct msghdr msg;
1795 int count;
1796 struct iovec *vec;
1797 abi_ulong target_vec;
1798
1799 /* FIXME */
1800 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1801 msgp,
1802 target_msg,
1803 send ? 1 : 0))
1804 return -TARGET_EFAULT;
1805 if (msgp->msg_name) {
1806 msg.msg_namelen = tswap32(msgp->msg_namelen);
1807 msg.msg_name = alloca(msg.msg_namelen);
1808 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1809 msg.msg_namelen);
1810 if (ret) {
1811 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1812 return ret;
1813 }
1814 } else {
1815 msg.msg_name = NULL;
1816 msg.msg_namelen = 0;
1817 }
1818 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1819 msg.msg_control = alloca(msg.msg_controllen);
1820 msg.msg_flags = tswap32(msgp->msg_flags);
1821
1822 count = tswapal(msgp->msg_iovlen);
1823 vec = alloca(count * sizeof(struct iovec));
1824 target_vec = tswapal(msgp->msg_iov);
1825 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1826 msg.msg_iovlen = count;
1827 msg.msg_iov = vec;
1828
1829 if (send) {
1830 ret = target_to_host_cmsg(&msg, msgp);
1831 if (ret == 0)
1832 ret = get_errno(sendmsg(fd, &msg, flags));
1833 } else {
1834 ret = get_errno(recvmsg(fd, &msg, flags));
1835 if (!is_error(ret)) {
1836 len = ret;
1837 ret = host_to_target_cmsg(msgp, &msg);
1838 if (!is_error(ret))
1839 ret = len;
1840 }
1841 }
1842 unlock_iovec(vec, target_vec, count, !send);
1843 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1844 return ret;
1845 }
1846
1847 /* do_accept() Must return target values and target errnos. */
1848 static abi_long do_accept(int fd, abi_ulong target_addr,
1849 abi_ulong target_addrlen_addr)
1850 {
1851 socklen_t addrlen;
1852 void *addr;
1853 abi_long ret;
1854
1855 if (target_addr == 0)
1856 return get_errno(accept(fd, NULL, NULL));
1857
1858 /* linux returns EINVAL if addrlen pointer is invalid */
1859 if (get_user_u32(addrlen, target_addrlen_addr))
1860 return -TARGET_EINVAL;
1861
1862 if ((int)addrlen < 0) {
1863 return -TARGET_EINVAL;
1864 }
1865
1866 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1867 return -TARGET_EINVAL;
1868
1869 addr = alloca(addrlen);
1870
1871 ret = get_errno(accept(fd, addr, &addrlen));
1872 if (!is_error(ret)) {
1873 host_to_target_sockaddr(target_addr, addr, addrlen);
1874 if (put_user_u32(addrlen, target_addrlen_addr))
1875 ret = -TARGET_EFAULT;
1876 }
1877 return ret;
1878 }
1879
1880 /* do_getpeername() Must return target values and target errnos. */
1881 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1882 abi_ulong target_addrlen_addr)
1883 {
1884 socklen_t addrlen;
1885 void *addr;
1886 abi_long ret;
1887
1888 if (get_user_u32(addrlen, target_addrlen_addr))
1889 return -TARGET_EFAULT;
1890
1891 if ((int)addrlen < 0) {
1892 return -TARGET_EINVAL;
1893 }
1894
1895 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1896 return -TARGET_EFAULT;
1897
1898 addr = alloca(addrlen);
1899
1900 ret = get_errno(getpeername(fd, addr, &addrlen));
1901 if (!is_error(ret)) {
1902 host_to_target_sockaddr(target_addr, addr, addrlen);
1903 if (put_user_u32(addrlen, target_addrlen_addr))
1904 ret = -TARGET_EFAULT;
1905 }
1906 return ret;
1907 }
1908
1909 /* do_getsockname() Must return target values and target errnos. */
1910 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1911 abi_ulong target_addrlen_addr)
1912 {
1913 socklen_t addrlen;
1914 void *addr;
1915 abi_long ret;
1916
1917 if (get_user_u32(addrlen, target_addrlen_addr))
1918 return -TARGET_EFAULT;
1919
1920 if ((int)addrlen < 0) {
1921 return -TARGET_EINVAL;
1922 }
1923
1924 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1925 return -TARGET_EFAULT;
1926
1927 addr = alloca(addrlen);
1928
1929 ret = get_errno(getsockname(fd, addr, &addrlen));
1930 if (!is_error(ret)) {
1931 host_to_target_sockaddr(target_addr, addr, addrlen);
1932 if (put_user_u32(addrlen, target_addrlen_addr))
1933 ret = -TARGET_EFAULT;
1934 }
1935 return ret;
1936 }
1937
1938 /* do_socketpair() Must return target values and target errnos. */
1939 static abi_long do_socketpair(int domain, int type, int protocol,
1940 abi_ulong target_tab_addr)
1941 {
1942 int tab[2];
1943 abi_long ret;
1944
1945 ret = get_errno(socketpair(domain, type, protocol, tab));
1946 if (!is_error(ret)) {
1947 if (put_user_s32(tab[0], target_tab_addr)
1948 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1949 ret = -TARGET_EFAULT;
1950 }
1951 return ret;
1952 }
1953
1954 /* do_sendto() Must return target values and target errnos. */
1955 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1956 abi_ulong target_addr, socklen_t addrlen)
1957 {
1958 void *addr;
1959 void *host_msg;
1960 abi_long ret;
1961
1962 if ((int)addrlen < 0) {
1963 return -TARGET_EINVAL;
1964 }
1965
1966 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1967 if (!host_msg)
1968 return -TARGET_EFAULT;
1969 if (target_addr) {
1970 addr = alloca(addrlen);
1971 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1972 if (ret) {
1973 unlock_user(host_msg, msg, 0);
1974 return ret;
1975 }
1976 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1977 } else {
1978 ret = get_errno(send(fd, host_msg, len, flags));
1979 }
1980 unlock_user(host_msg, msg, 0);
1981 return ret;
1982 }
1983
1984 /* do_recvfrom() Must return target values and target errnos. */
1985 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1986 abi_ulong target_addr,
1987 abi_ulong target_addrlen)
1988 {
1989 socklen_t addrlen;
1990 void *addr;
1991 void *host_msg;
1992 abi_long ret;
1993
1994 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1995 if (!host_msg)
1996 return -TARGET_EFAULT;
1997 if (target_addr) {
1998 if (get_user_u32(addrlen, target_addrlen)) {
1999 ret = -TARGET_EFAULT;
2000 goto fail;
2001 }
2002 if ((int)addrlen < 0) {
2003 ret = -TARGET_EINVAL;
2004 goto fail;
2005 }
2006 addr = alloca(addrlen);
2007 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2008 } else {
2009 addr = NULL; /* To keep compiler quiet. */
2010 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2011 }
2012 if (!is_error(ret)) {
2013 if (target_addr) {
2014 host_to_target_sockaddr(target_addr, addr, addrlen);
2015 if (put_user_u32(addrlen, target_addrlen)) {
2016 ret = -TARGET_EFAULT;
2017 goto fail;
2018 }
2019 }
2020 unlock_user(host_msg, msg, len);
2021 } else {
2022 fail:
2023 unlock_user(host_msg, msg, 0);
2024 }
2025 return ret;
2026 }
2027
2028 #ifdef TARGET_NR_socketcall
2029 /* do_socketcall() Must return target values and target errnos. */
2030 static abi_long do_socketcall(int num, abi_ulong vptr)
2031 {
2032 abi_long ret;
2033 const int n = sizeof(abi_ulong);
2034
2035 switch(num) {
2036 case SOCKOP_socket:
2037 {
2038 abi_ulong domain, type, protocol;
2039
2040 if (get_user_ual(domain, vptr)
2041 || get_user_ual(type, vptr + n)
2042 || get_user_ual(protocol, vptr + 2 * n))
2043 return -TARGET_EFAULT;
2044
2045 ret = do_socket(domain, type, protocol);
2046 }
2047 break;
2048 case SOCKOP_bind:
2049 {
2050 abi_ulong sockfd;
2051 abi_ulong target_addr;
2052 socklen_t addrlen;
2053
2054 if (get_user_ual(sockfd, vptr)
2055 || get_user_ual(target_addr, vptr + n)
2056 || get_user_ual(addrlen, vptr + 2 * n))
2057 return -TARGET_EFAULT;
2058
2059 ret = do_bind(sockfd, target_addr, addrlen);
2060 }
2061 break;
2062 case SOCKOP_connect:
2063 {
2064 abi_ulong sockfd;
2065 abi_ulong target_addr;
2066 socklen_t addrlen;
2067
2068 if (get_user_ual(sockfd, vptr)
2069 || get_user_ual(target_addr, vptr + n)
2070 || get_user_ual(addrlen, vptr + 2 * n))
2071 return -TARGET_EFAULT;
2072
2073 ret = do_connect(sockfd, target_addr, addrlen);
2074 }
2075 break;
2076 case SOCKOP_listen:
2077 {
2078 abi_ulong sockfd, backlog;
2079
2080 if (get_user_ual(sockfd, vptr)
2081 || get_user_ual(backlog, vptr + n))
2082 return -TARGET_EFAULT;
2083
2084 ret = get_errno(listen(sockfd, backlog));
2085 }
2086 break;
2087 case SOCKOP_accept:
2088 {
2089 abi_ulong sockfd;
2090 abi_ulong target_addr, target_addrlen;
2091
2092 if (get_user_ual(sockfd, vptr)
2093 || get_user_ual(target_addr, vptr + n)
2094 || get_user_ual(target_addrlen, vptr + 2 * n))
2095 return -TARGET_EFAULT;
2096
2097 ret = do_accept(sockfd, target_addr, target_addrlen);
2098 }
2099 break;
2100 case SOCKOP_getsockname:
2101 {
2102 abi_ulong sockfd;
2103 abi_ulong target_addr, target_addrlen;
2104
2105 if (get_user_ual(sockfd, vptr)
2106 || get_user_ual(target_addr, vptr + n)
2107 || get_user_ual(target_addrlen, vptr + 2 * n))
2108 return -TARGET_EFAULT;
2109
2110 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2111 }
2112 break;
2113 case SOCKOP_getpeername:
2114 {
2115 abi_ulong sockfd;
2116 abi_ulong target_addr, target_addrlen;
2117
2118 if (get_user_ual(sockfd, vptr)
2119 || get_user_ual(target_addr, vptr + n)
2120 || get_user_ual(target_addrlen, vptr + 2 * n))
2121 return -TARGET_EFAULT;
2122
2123 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2124 }
2125 break;
2126 case SOCKOP_socketpair:
2127 {
2128 abi_ulong domain, type, protocol;
2129 abi_ulong tab;
2130
2131 if (get_user_ual(domain, vptr)
2132 || get_user_ual(type, vptr + n)
2133 || get_user_ual(protocol, vptr + 2 * n)
2134 || get_user_ual(tab, vptr + 3 * n))
2135 return -TARGET_EFAULT;
2136
2137 ret = do_socketpair(domain, type, protocol, tab);
2138 }
2139 break;
2140 case SOCKOP_send:
2141 {
2142 abi_ulong sockfd;
2143 abi_ulong msg;
2144 size_t len;
2145 abi_ulong flags;
2146
2147 if (get_user_ual(sockfd, vptr)
2148 || get_user_ual(msg, vptr + n)
2149 || get_user_ual(len, vptr + 2 * n)
2150 || get_user_ual(flags, vptr + 3 * n))
2151 return -TARGET_EFAULT;
2152
2153 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2154 }
2155 break;
2156 case SOCKOP_recv:
2157 {
2158 abi_ulong sockfd;
2159 abi_ulong msg;
2160 size_t len;
2161 abi_ulong flags;
2162
2163 if (get_user_ual(sockfd, vptr)
2164 || get_user_ual(msg, vptr + n)
2165 || get_user_ual(len, vptr + 2 * n)
2166 || get_user_ual(flags, vptr + 3 * n))
2167 return -TARGET_EFAULT;
2168
2169 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2170 }
2171 break;
2172 case SOCKOP_sendto:
2173 {
2174 abi_ulong sockfd;
2175 abi_ulong msg;
2176 size_t len;
2177 abi_ulong flags;
2178 abi_ulong addr;
2179 socklen_t addrlen;
2180
2181 if (get_user_ual(sockfd, vptr)
2182 || get_user_ual(msg, vptr + n)
2183 || get_user_ual(len, vptr + 2 * n)
2184 || get_user_ual(flags, vptr + 3 * n)
2185 || get_user_ual(addr, vptr + 4 * n)
2186 || get_user_ual(addrlen, vptr + 5 * n))
2187 return -TARGET_EFAULT;
2188
2189 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2190 }
2191 break;
2192 case SOCKOP_recvfrom:
2193 {
2194 abi_ulong sockfd;
2195 abi_ulong msg;
2196 size_t len;
2197 abi_ulong flags;
2198 abi_ulong addr;
2199 socklen_t addrlen;
2200
2201 if (get_user_ual(sockfd, vptr)
2202 || get_user_ual(msg, vptr + n)
2203 || get_user_ual(len, vptr + 2 * n)
2204 || get_user_ual(flags, vptr + 3 * n)
2205 || get_user_ual(addr, vptr + 4 * n)
2206 || get_user_ual(addrlen, vptr + 5 * n))
2207 return -TARGET_EFAULT;
2208
2209 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2210 }
2211 break;
2212 case SOCKOP_shutdown:
2213 {
2214 abi_ulong sockfd, how;
2215
2216 if (get_user_ual(sockfd, vptr)
2217 || get_user_ual(how, vptr + n))
2218 return -TARGET_EFAULT;
2219
2220 ret = get_errno(shutdown(sockfd, how));
2221 }
2222 break;
2223 case SOCKOP_sendmsg:
2224 case SOCKOP_recvmsg:
2225 {
2226 abi_ulong fd;
2227 abi_ulong target_msg;
2228 abi_ulong flags;
2229
2230 if (get_user_ual(fd, vptr)
2231 || get_user_ual(target_msg, vptr + n)
2232 || get_user_ual(flags, vptr + 2 * n))
2233 return -TARGET_EFAULT;
2234
2235 ret = do_sendrecvmsg(fd, target_msg, flags,
2236 (num == SOCKOP_sendmsg));
2237 }
2238 break;
2239 case SOCKOP_setsockopt:
2240 {
2241 abi_ulong sockfd;
2242 abi_ulong level;
2243 abi_ulong optname;
2244 abi_ulong optval;
2245 socklen_t optlen;
2246
2247 if (get_user_ual(sockfd, vptr)
2248 || get_user_ual(level, vptr + n)
2249 || get_user_ual(optname, vptr + 2 * n)
2250 || get_user_ual(optval, vptr + 3 * n)
2251 || get_user_ual(optlen, vptr + 4 * n))
2252 return -TARGET_EFAULT;
2253
2254 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2255 }
2256 break;
2257 case SOCKOP_getsockopt:
2258 {
2259 abi_ulong sockfd;
2260 abi_ulong level;
2261 abi_ulong optname;
2262 abi_ulong optval;
2263 socklen_t optlen;
2264
2265 if (get_user_ual(sockfd, vptr)
2266 || get_user_ual(level, vptr + n)
2267 || get_user_ual(optname, vptr + 2 * n)
2268 || get_user_ual(optval, vptr + 3 * n)
2269 || get_user_ual(optlen, vptr + 4 * n))
2270 return -TARGET_EFAULT;
2271
2272 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2273 }
2274 break;
2275 default:
2276 gemu_log("Unsupported socketcall: %d\n", num);
2277 ret = -TARGET_ENOSYS;
2278 break;
2279 }
2280 return ret;
2281 }
2282 #endif
2283
2284 #define N_SHM_REGIONS 32
2285
2286 static struct shm_region {
2287 abi_ulong start;
2288 abi_ulong size;
2289 } shm_regions[N_SHM_REGIONS];
2290
2291 struct target_ipc_perm
2292 {
2293 abi_long __key;
2294 abi_ulong uid;
2295 abi_ulong gid;
2296 abi_ulong cuid;
2297 abi_ulong cgid;
2298 unsigned short int mode;
2299 unsigned short int __pad1;
2300 unsigned short int __seq;
2301 unsigned short int __pad2;
2302 abi_ulong __unused1;
2303 abi_ulong __unused2;
2304 };
2305
2306 struct target_semid_ds
2307 {
2308 struct target_ipc_perm sem_perm;
2309 abi_ulong sem_otime;
2310 abi_ulong __unused1;
2311 abi_ulong sem_ctime;
2312 abi_ulong __unused2;
2313 abi_ulong sem_nsems;
2314 abi_ulong __unused3;
2315 abi_ulong __unused4;
2316 };
2317
2318 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2319 abi_ulong target_addr)
2320 {
2321 struct target_ipc_perm *target_ip;
2322 struct target_semid_ds *target_sd;
2323
2324 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2325 return -TARGET_EFAULT;
2326 target_ip = &(target_sd->sem_perm);
2327 host_ip->__key = tswapal(target_ip->__key);
2328 host_ip->uid = tswapal(target_ip->uid);
2329 host_ip->gid = tswapal(target_ip->gid);
2330 host_ip->cuid = tswapal(target_ip->cuid);
2331 host_ip->cgid = tswapal(target_ip->cgid);
2332 host_ip->mode = tswap16(target_ip->mode);
2333 unlock_user_struct(target_sd, target_addr, 0);
2334 return 0;
2335 }
2336
2337 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2338 struct ipc_perm *host_ip)
2339 {
2340 struct target_ipc_perm *target_ip;
2341 struct target_semid_ds *target_sd;
2342
2343 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2344 return -TARGET_EFAULT;
2345 target_ip = &(target_sd->sem_perm);
2346 target_ip->__key = tswapal(host_ip->__key);
2347 target_ip->uid = tswapal(host_ip->uid);
2348 target_ip->gid = tswapal(host_ip->gid);
2349 target_ip->cuid = tswapal(host_ip->cuid);
2350 target_ip->cgid = tswapal(host_ip->cgid);
2351 target_ip->mode = tswap16(host_ip->mode);
2352 unlock_user_struct(target_sd, target_addr, 1);
2353 return 0;
2354 }
2355
2356 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2357 abi_ulong target_addr)
2358 {
2359 struct target_semid_ds *target_sd;
2360
2361 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2362 return -TARGET_EFAULT;
2363 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2364 return -TARGET_EFAULT;
2365 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2366 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2367 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2368 unlock_user_struct(target_sd, target_addr, 0);
2369 return 0;
2370 }
2371
2372 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2373 struct semid_ds *host_sd)
2374 {
2375 struct target_semid_ds *target_sd;
2376
2377 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2378 return -TARGET_EFAULT;
2379 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2380 return -TARGET_EFAULT;
2381 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2382 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2383 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2384 unlock_user_struct(target_sd, target_addr, 1);
2385 return 0;
2386 }
2387
2388 struct target_seminfo {
2389 int semmap;
2390 int semmni;
2391 int semmns;
2392 int semmnu;
2393 int semmsl;
2394 int semopm;
2395 int semume;
2396 int semusz;
2397 int semvmx;
2398 int semaem;
2399 };
2400
2401 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2402 struct seminfo *host_seminfo)
2403 {
2404 struct target_seminfo *target_seminfo;
2405 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2406 return -TARGET_EFAULT;
2407 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2408 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2409 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2410 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2411 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2412 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2413 __put_user(host_seminfo->semume, &target_seminfo->semume);
2414 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2415 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2416 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2417 unlock_user_struct(target_seminfo, target_addr, 1);
2418 return 0;
2419 }
2420
2421 union semun {
2422 int val;
2423 struct semid_ds *buf;
2424 unsigned short *array;
2425 struct seminfo *__buf;
2426 };
2427
2428 union target_semun {
2429 int val;
2430 abi_ulong buf;
2431 abi_ulong array;
2432 abi_ulong __buf;
2433 };
2434
2435 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2436 abi_ulong target_addr)
2437 {
2438 int nsems;
2439 unsigned short *array;
2440 union semun semun;
2441 struct semid_ds semid_ds;
2442 int i, ret;
2443
2444 semun.buf = &semid_ds;
2445
2446 ret = semctl(semid, 0, IPC_STAT, semun);
2447 if (ret == -1)
2448 return get_errno(ret);
2449
2450 nsems = semid_ds.sem_nsems;
2451
2452 *host_array = malloc(nsems*sizeof(unsigned short));
2453 array = lock_user(VERIFY_READ, target_addr,
2454 nsems*sizeof(unsigned short), 1);
2455 if (!array)
2456 return -TARGET_EFAULT;
2457
2458 for(i=0; i<nsems; i++) {
2459 __get_user((*host_array)[i], &array[i]);
2460 }
2461 unlock_user(array, target_addr, 0);
2462
2463 return 0;
2464 }
2465
2466 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2467 unsigned short **host_array)
2468 {
2469 int nsems;
2470 unsigned short *array;
2471 union semun semun;
2472 struct semid_ds semid_ds;
2473 int i, ret;
2474
2475 semun.buf = &semid_ds;
2476
2477 ret = semctl(semid, 0, IPC_STAT, semun);
2478 if (ret == -1)
2479 return get_errno(ret);
2480
2481 nsems = semid_ds.sem_nsems;
2482
2483 array = lock_user(VERIFY_WRITE, target_addr,
2484 nsems*sizeof(unsigned short), 0);
2485 if (!array)
2486 return -TARGET_EFAULT;
2487
2488 for(i=0; i<nsems; i++) {
2489 __put_user((*host_array)[i], &array[i]);
2490 }
2491 free(*host_array);
2492 unlock_user(array, target_addr, 1);
2493
2494 return 0;
2495 }
2496
2497 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2498 union target_semun target_su)
2499 {
2500 union semun arg;
2501 struct semid_ds dsarg;
2502 unsigned short *array = NULL;
2503 struct seminfo seminfo;
2504 abi_long ret = -TARGET_EINVAL;
2505 abi_long err;
2506 cmd &= 0xff;
2507
2508 switch( cmd ) {
2509 case GETVAL:
2510 case SETVAL:
2511 arg.val = tswap32(target_su.val);
2512 ret = get_errno(semctl(semid, semnum, cmd, arg));
2513 target_su.val = tswap32(arg.val);
2514 break;
2515 case GETALL:
2516 case SETALL:
2517 err = target_to_host_semarray(semid, &array, target_su.array);
2518 if (err)
2519 return err;
2520 arg.array = array;
2521 ret = get_errno(semctl(semid, semnum, cmd, arg));
2522 err = host_to_target_semarray(semid, target_su.array, &array);
2523 if (err)
2524 return err;
2525 break;
2526 case IPC_STAT:
2527 case IPC_SET:
2528 case SEM_STAT:
2529 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2530 if (err)
2531 return err;
2532 arg.buf = &dsarg;
2533 ret = get_errno(semctl(semid, semnum, cmd, arg));
2534 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2535 if (err)
2536 return err;
2537 break;
2538 case IPC_INFO:
2539 case SEM_INFO:
2540 arg.__buf = &seminfo;
2541 ret = get_errno(semctl(semid, semnum, cmd, arg));
2542 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2543 if (err)
2544 return err;
2545 break;
2546 case IPC_RMID:
2547 case GETPID:
2548 case GETNCNT:
2549 case GETZCNT:
2550 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2551 break;
2552 }
2553
2554 return ret;
2555 }
2556
2557 struct target_sembuf {
2558 unsigned short sem_num;
2559 short sem_op;
2560 short sem_flg;
2561 };
2562
2563 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2564 abi_ulong target_addr,
2565 unsigned nsops)
2566 {
2567 struct target_sembuf *target_sembuf;
2568 int i;
2569
2570 target_sembuf = lock_user(VERIFY_READ, target_addr,
2571 nsops*sizeof(struct target_sembuf), 1);
2572 if (!target_sembuf)
2573 return -TARGET_EFAULT;
2574
2575 for(i=0; i<nsops; i++) {
2576 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2577 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2578 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2579 }
2580
2581 unlock_user(target_sembuf, target_addr, 0);
2582
2583 return 0;
2584 }
2585
2586 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2587 {
2588 struct sembuf sops[nsops];
2589
2590 if (target_to_host_sembuf(sops, ptr, nsops))
2591 return -TARGET_EFAULT;
2592
2593 return semop(semid, sops, nsops);
2594 }
2595
2596 struct target_msqid_ds
2597 {
2598 struct target_ipc_perm msg_perm;
2599 abi_ulong msg_stime;
2600 #if TARGET_ABI_BITS == 32
2601 abi_ulong __unused1;
2602 #endif
2603 abi_ulong msg_rtime;
2604 #if TARGET_ABI_BITS == 32
2605 abi_ulong __unused2;
2606 #endif
2607 abi_ulong msg_ctime;
2608 #if TARGET_ABI_BITS == 32
2609 abi_ulong __unused3;
2610 #endif
2611 abi_ulong __msg_cbytes;
2612 abi_ulong msg_qnum;
2613 abi_ulong msg_qbytes;
2614 abi_ulong msg_lspid;
2615 abi_ulong msg_lrpid;
2616 abi_ulong __unused4;
2617 abi_ulong __unused5;
2618 };
2619
2620 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2621 abi_ulong target_addr)
2622 {
2623 struct target_msqid_ds *target_md;
2624
2625 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2626 return -TARGET_EFAULT;
2627 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2628 return -TARGET_EFAULT;
2629 host_md->msg_stime = tswapal(target_md->msg_stime);
2630 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2631 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2632 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2633 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2634 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2635 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2636 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2637 unlock_user_struct(target_md, target_addr, 0);
2638 return 0;
2639 }
2640
2641 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2642 struct msqid_ds *host_md)
2643 {
2644 struct target_msqid_ds *target_md;
2645
2646 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2647 return -TARGET_EFAULT;
2648 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2649 return -TARGET_EFAULT;
2650 target_md->msg_stime = tswapal(host_md->msg_stime);
2651 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2652 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2653 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2654 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2655 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2656 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2657 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2658 unlock_user_struct(target_md, target_addr, 1);
2659 return 0;
2660 }
2661
2662 struct target_msginfo {
2663 int msgpool;
2664 int msgmap;
2665 int msgmax;
2666 int msgmnb;
2667 int msgmni;
2668 int msgssz;
2669 int msgtql;
2670 unsigned short int msgseg;
2671 };
2672
2673 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2674 struct msginfo *host_msginfo)
2675 {
2676 struct target_msginfo *target_msginfo;
2677 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2678 return -TARGET_EFAULT;
2679 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2680 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2681 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2682 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2683 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2684 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2685 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2686 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2687 unlock_user_struct(target_msginfo, target_addr, 1);
2688 return 0;
2689 }
2690
2691 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2692 {
2693 struct msqid_ds dsarg;
2694 struct msginfo msginfo;
2695 abi_long ret = -TARGET_EINVAL;
2696
2697 cmd &= 0xff;
2698
2699 switch (cmd) {
2700 case IPC_STAT:
2701 case IPC_SET:
2702 case MSG_STAT:
2703 if (target_to_host_msqid_ds(&dsarg,ptr))
2704 return -TARGET_EFAULT;
2705 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2706 if (host_to_target_msqid_ds(ptr,&dsarg))
2707 return -TARGET_EFAULT;
2708 break;
2709 case IPC_RMID:
2710 ret = get_errno(msgctl(msgid, cmd, NULL));
2711 break;
2712 case IPC_INFO:
2713 case MSG_INFO:
2714 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2715 if (host_to_target_msginfo(ptr, &msginfo))
2716 return -TARGET_EFAULT;
2717 break;
2718 }
2719
2720 return ret;
2721 }
2722
2723 struct target_msgbuf {
2724 abi_long mtype;
2725 char mtext[1];
2726 };
2727
2728 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2729 unsigned int msgsz, int msgflg)
2730 {
2731 struct target_msgbuf *target_mb;
2732 struct msgbuf *host_mb;
2733 abi_long ret = 0;
2734
2735 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2736 return -TARGET_EFAULT;
2737 host_mb = malloc(msgsz+sizeof(long));
2738 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2739 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2740 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2741 free(host_mb);
2742 unlock_user_struct(target_mb, msgp, 0);
2743
2744 return ret;
2745 }
2746
2747 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2748 unsigned int msgsz, abi_long msgtyp,
2749 int msgflg)
2750 {
2751 struct target_msgbuf *target_mb;
2752 char *target_mtext;
2753 struct msgbuf *host_mb;
2754 abi_long ret = 0;
2755
2756 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2757 return -TARGET_EFAULT;
2758
2759 host_mb = malloc(msgsz+sizeof(long));
2760 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2761
2762 if (ret > 0) {
2763 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2764 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2765 if (!target_mtext) {
2766 ret = -TARGET_EFAULT;
2767 goto end;
2768 }
2769 memcpy(target_mb->mtext, host_mb->mtext, ret);
2770 unlock_user(target_mtext, target_mtext_addr, ret);
2771 }
2772
2773 target_mb->mtype = tswapal(host_mb->mtype);
2774 free(host_mb);
2775
2776 end:
2777 if (target_mb)
2778 unlock_user_struct(target_mb, msgp, 1);
2779 return ret;
2780 }
2781
2782 struct target_shmid_ds
2783 {
2784 struct target_ipc_perm shm_perm;
2785 abi_ulong shm_segsz;
2786 abi_ulong shm_atime;
2787 #if TARGET_ABI_BITS == 32
2788 abi_ulong __unused1;
2789 #endif
2790 abi_ulong shm_dtime;
2791 #if TARGET_ABI_BITS == 32
2792 abi_ulong __unused2;
2793 #endif
2794 abi_ulong shm_ctime;
2795 #if TARGET_ABI_BITS == 32
2796 abi_ulong __unused3;
2797 #endif
2798 int shm_cpid;
2799 int shm_lpid;
2800 abi_ulong shm_nattch;
2801 unsigned long int __unused4;
2802 unsigned long int __unused5;
2803 };
2804
2805 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2806 abi_ulong target_addr)
2807 {
2808 struct target_shmid_ds *target_sd;
2809
2810 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2811 return -TARGET_EFAULT;
2812 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2813 return -TARGET_EFAULT;
2814 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2815 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2816 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2817 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2818 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2819 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2820 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2821 unlock_user_struct(target_sd, target_addr, 0);
2822 return 0;
2823 }
2824
2825 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2826 struct shmid_ds *host_sd)
2827 {
2828 struct target_shmid_ds *target_sd;
2829
2830 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2831 return -TARGET_EFAULT;
2832 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2833 return -TARGET_EFAULT;
2834 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2835 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2836 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2837 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2838 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2839 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2840 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2841 unlock_user_struct(target_sd, target_addr, 1);
2842 return 0;
2843 }
2844
2845 struct target_shminfo {
2846 abi_ulong shmmax;
2847 abi_ulong shmmin;
2848 abi_ulong shmmni;
2849 abi_ulong shmseg;
2850 abi_ulong shmall;
2851 };
2852
2853 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2854 struct shminfo *host_shminfo)
2855 {
2856 struct target_shminfo *target_shminfo;
2857 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2858 return -TARGET_EFAULT;
2859 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2860 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2861 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2862 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2863 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2864 unlock_user_struct(target_shminfo, target_addr, 1);
2865 return 0;
2866 }
2867
2868 struct target_shm_info {
2869 int used_ids;
2870 abi_ulong shm_tot;
2871 abi_ulong shm_rss;
2872 abi_ulong shm_swp;
2873 abi_ulong swap_attempts;
2874 abi_ulong swap_successes;
2875 };
2876
2877 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2878 struct shm_info *host_shm_info)
2879 {
2880 struct target_shm_info *target_shm_info;
2881 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2882 return -TARGET_EFAULT;
2883 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2884 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2885 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2886 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2887 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2888 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2889 unlock_user_struct(target_shm_info, target_addr, 1);
2890 return 0;
2891 }
2892
2893 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2894 {
2895 struct shmid_ds dsarg;
2896 struct shminfo shminfo;
2897 struct shm_info shm_info;
2898 abi_long ret = -TARGET_EINVAL;
2899
2900 cmd &= 0xff;
2901
2902 switch(cmd) {
2903 case IPC_STAT:
2904 case IPC_SET:
2905 case SHM_STAT:
2906 if (target_to_host_shmid_ds(&dsarg, buf))
2907 return -TARGET_EFAULT;
2908 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2909 if (host_to_target_shmid_ds(buf, &dsarg))
2910 return -TARGET_EFAULT;
2911 break;
2912 case IPC_INFO:
2913 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2914 if (host_to_target_shminfo(buf, &shminfo))
2915 return -TARGET_EFAULT;
2916 break;
2917 case SHM_INFO:
2918 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2919 if (host_to_target_shm_info(buf, &shm_info))
2920 return -TARGET_EFAULT;
2921 break;
2922 case IPC_RMID:
2923 case SHM_LOCK:
2924 case SHM_UNLOCK:
2925 ret = get_errno(shmctl(shmid, cmd, NULL));
2926 break;
2927 }
2928
2929 return ret;
2930 }
2931
2932 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2933 {
2934 abi_long raddr;
2935 void *host_raddr;
2936 struct shmid_ds shm_info;
2937 int i,ret;
2938
2939 /* find out the length of the shared memory segment */
2940 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2941 if (is_error(ret)) {
2942 /* can't get length, bail out */
2943 return ret;
2944 }
2945
2946 mmap_lock();
2947
2948 if (shmaddr)
2949 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2950 else {
2951 abi_ulong mmap_start;
2952
2953 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2954
2955 if (mmap_start == -1) {
2956 errno = ENOMEM;
2957 host_raddr = (void *)-1;
2958 } else
2959 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2960 }
2961
2962 if (host_raddr == (void *)-1) {
2963 mmap_unlock();
2964 return get_errno((long)host_raddr);
2965 }
2966 raddr=h2g((unsigned long)host_raddr);
2967
2968 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2969 PAGE_VALID | PAGE_READ |
2970 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2971
2972 for (i = 0; i < N_SHM_REGIONS; i++) {
2973 if (shm_regions[i].start == 0) {
2974 shm_regions[i].start = raddr;
2975 shm_regions[i].size = shm_info.shm_segsz;
2976 break;
2977 }
2978 }
2979
2980 mmap_unlock();
2981 return raddr;
2982
2983 }
2984
2985 static inline abi_long do_shmdt(abi_ulong shmaddr)
2986 {
2987 int i;
2988
2989 for (i = 0; i < N_SHM_REGIONS; ++i) {
2990 if (shm_regions[i].start == shmaddr) {
2991 shm_regions[i].start = 0;
2992 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2993 break;
2994 }
2995 }
2996
2997 return get_errno(shmdt(g2h(shmaddr)));
2998 }
2999
3000 #ifdef TARGET_NR_ipc
3001 /* ??? This only works with linear mappings. */
3002 /* do_ipc() must return target values and target errnos. */
3003 static abi_long do_ipc(unsigned int call, int first,
3004 int second, int third,
3005 abi_long ptr, abi_long fifth)
3006 {
3007 int version;
3008 abi_long ret = 0;
3009
3010 version = call >> 16;
3011 call &= 0xffff;
3012
3013 switch (call) {
3014 case IPCOP_semop:
3015 ret = do_semop(first, ptr, second);
3016 break;
3017
3018 case IPCOP_semget:
3019 ret = get_errno(semget(first, second, third));
3020 break;
3021
3022 case IPCOP_semctl:
3023 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3024 break;
3025
3026 case IPCOP_msgget:
3027 ret = get_errno(msgget(first, second));
3028 break;
3029
3030 case IPCOP_msgsnd:
3031 ret = do_msgsnd(first, ptr, second, third);
3032 break;
3033
3034 case IPCOP_msgctl:
3035 ret = do_msgctl(first, second, ptr);
3036 break;
3037
3038 case IPCOP_msgrcv:
3039 switch (version) {
3040 case 0:
3041 {
3042 struct target_ipc_kludge {
3043 abi_long msgp;
3044 abi_long msgtyp;
3045 } *tmp;
3046
3047 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3048 ret = -TARGET_EFAULT;
3049 break;
3050 }
3051
3052 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3053
3054 unlock_user_struct(tmp, ptr, 0);
3055 break;
3056 }
3057 default:
3058 ret = do_msgrcv(first, ptr, second, fifth, third);
3059 }
3060 break;
3061
3062 case IPCOP_shmat:
3063 switch (version) {
3064 default:
3065 {
3066 abi_ulong raddr;
3067 raddr = do_shmat(first, ptr, second);
3068 if (is_error(raddr))
3069 return get_errno(raddr);
3070 if (put_user_ual(raddr, third))
3071 return -TARGET_EFAULT;
3072 break;
3073 }
3074 case 1:
3075 ret = -TARGET_EINVAL;
3076 break;
3077 }
3078 break;
3079 case IPCOP_shmdt:
3080 ret = do_shmdt(ptr);
3081 break;
3082
3083 case IPCOP_shmget:
3084 /* IPC_* flag values are the same on all linux platforms */
3085 ret = get_errno(shmget(first, second, third));
3086 break;
3087
3088 /* IPC_* and SHM_* command values are the same on all linux platforms */
3089 case IPCOP_shmctl:
3090 ret = do_shmctl(first, second, third);
3091 break;
3092 default:
3093 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3094 ret = -TARGET_ENOSYS;
3095 break;
3096 }
3097 return ret;
3098 }
3099 #endif
3100
3101 /* kernel structure types definitions */
3102
3103 #define STRUCT(name, ...) STRUCT_ ## name,
3104 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3105 enum {
3106 #include "syscall_types.h"
3107 };
3108 #undef STRUCT
3109 #undef STRUCT_SPECIAL
3110
3111 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3112 #define STRUCT_SPECIAL(name)
3113 #include "syscall_types.h"
3114 #undef STRUCT
3115 #undef STRUCT_SPECIAL
3116
3117 typedef struct IOCTLEntry IOCTLEntry;
3118
3119 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3120 int fd, abi_long cmd, abi_long arg);
3121
3122 struct IOCTLEntry {
3123 unsigned int target_cmd;
3124 unsigned int host_cmd;
3125 const char *name;
3126 int access;
3127 do_ioctl_fn *do_ioctl;
3128 const argtype arg_type[5];
3129 };
3130
3131 #define IOC_R 0x0001
3132 #define IOC_W 0x0002
3133 #define IOC_RW (IOC_R | IOC_W)
3134
3135 #define MAX_STRUCT_SIZE 4096
3136
3137 #ifdef CONFIG_FIEMAP
3138 /* So fiemap access checks don't overflow on 32 bit systems.
3139 * This is very slightly smaller than the limit imposed by
3140 * the underlying kernel.
3141 */
3142 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3143 / sizeof(struct fiemap_extent))
3144
3145 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3146 int fd, abi_long cmd, abi_long arg)
3147 {
3148 /* The parameter for this ioctl is a struct fiemap followed
3149 * by an array of struct fiemap_extent whose size is set
3150 * in fiemap->fm_extent_count. The array is filled in by the
3151 * ioctl.
3152 */
3153 int target_size_in, target_size_out;
3154 struct fiemap *fm;
3155 const argtype *arg_type = ie->arg_type;
3156 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3157 void *argptr, *p;
3158 abi_long ret;
3159 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3160 uint32_t outbufsz;
3161 int free_fm = 0;
3162
3163 assert(arg_type[0] == TYPE_PTR);
3164 assert(ie->access == IOC_RW);
3165 arg_type++;
3166 target_size_in = thunk_type_size(arg_type, 0);
3167 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3168 if (!argptr) {
3169 return -TARGET_EFAULT;
3170 }
3171 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3172 unlock_user(argptr, arg, 0);
3173 fm = (struct fiemap *)buf_temp;
3174 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3175 return -TARGET_EINVAL;
3176 }
3177
3178 outbufsz = sizeof (*fm) +
3179 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3180
3181 if (outbufsz > MAX_STRUCT_SIZE) {
3182 /* We can't fit all the extents into the fixed size buffer.
3183 * Allocate one that is large enough and use it instead.
3184 */
3185 fm = malloc(outbufsz);
3186 if (!fm) {
3187 return -TARGET_ENOMEM;
3188 }
3189 memcpy(fm, buf_temp, sizeof(struct fiemap));
3190 free_fm = 1;
3191 }
3192 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3193 if (!is_error(ret)) {
3194 target_size_out = target_size_in;
3195 /* An extent_count of 0 means we were only counting the extents
3196 * so there are no structs to copy
3197 */
3198 if (fm->fm_extent_count != 0) {
3199 target_size_out += fm->fm_mapped_extents * extent_size;
3200 }
3201 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3202 if (!argptr) {
3203 ret = -TARGET_EFAULT;
3204 } else {
3205 /* Convert the struct fiemap */
3206 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3207 if (fm->fm_extent_count != 0) {
3208 p = argptr + target_size_in;
3209 /* ...and then all the struct fiemap_extents */
3210 for (i = 0; i < fm->fm_mapped_extents; i++) {
3211 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3212 THUNK_TARGET);
3213 p += extent_size;
3214 }
3215 }
3216 unlock_user(argptr, arg, target_size_out);
3217 }
3218 }
3219 if (free_fm) {
3220 free(fm);
3221 }
3222 return ret;
3223 }
3224 #endif
3225
3226 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3227 int fd, abi_long cmd, abi_long arg)
3228 {
3229 const argtype *arg_type = ie->arg_type;
3230 int target_size;
3231 void *argptr;
3232 int ret;
3233 struct ifconf *host_ifconf;
3234 uint32_t outbufsz;
3235 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3236 int target_ifreq_size;
3237 int nb_ifreq;
3238 int free_buf = 0;
3239 int i;
3240 int target_ifc_len;
3241 abi_long target_ifc_buf;
3242 int host_ifc_len;
3243 char *host_ifc_buf;
3244
3245 assert(arg_type[0] == TYPE_PTR);
3246 assert(ie->access == IOC_RW);
3247
3248 arg_type++;
3249 target_size = thunk_type_size(arg_type, 0);
3250
3251 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3252 if (!argptr)
3253 return -TARGET_EFAULT;
3254 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3255 unlock_user(argptr, arg, 0);
3256
3257 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3258 target_ifc_len = host_ifconf->ifc_len;
3259 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3260
3261 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3262 nb_ifreq = target_ifc_len / target_ifreq_size;
3263 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3264
3265 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3266 if (outbufsz > MAX_STRUCT_SIZE) {
3267 /* We can't fit all the extents into the fixed size buffer.
3268 * Allocate one that is large enough and use it instead.
3269 */
3270 host_ifconf = malloc(outbufsz);
3271 if (!host_ifconf) {
3272 return -TARGET_ENOMEM;
3273 }
3274 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3275 free_buf = 1;
3276 }
3277 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3278
3279 host_ifconf->ifc_len = host_ifc_len;
3280 host_ifconf->ifc_buf = host_ifc_buf;
3281
3282 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3283 if (!is_error(ret)) {
3284 /* convert host ifc_len to target ifc_len */
3285
3286 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3287 target_ifc_len = nb_ifreq * target_ifreq_size;
3288 host_ifconf->ifc_len = target_ifc_len;
3289
3290 /* restore target ifc_buf */
3291
3292 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3293
3294 /* copy struct ifconf to target user */
3295
3296 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3297 if (!argptr)
3298 return -TARGET_EFAULT;
3299 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3300 unlock_user(argptr, arg, target_size);
3301
3302 /* copy ifreq[] to target user */
3303
3304 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3305 for (i = 0; i < nb_ifreq ; i++) {
3306 thunk_convert(argptr + i * target_ifreq_size,
3307 host_ifc_buf + i * sizeof(struct ifreq),
3308 ifreq_arg_type, THUNK_TARGET);
3309 }
3310 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3311 }
3312
3313 if (free_buf) {
3314 free(host_ifconf);
3315 }
3316
3317 return ret;
3318 }
3319
3320 static IOCTLEntry ioctl_entries[] = {
3321 #define IOCTL(cmd, access, ...) \
3322 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3323 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3324 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3325 #include "ioctls.h"
3326 { 0, 0, },
3327 };
3328
3329 /* ??? Implement proper locking for ioctls. */
3330 /* do_ioctl() Must return target values and target errnos. */
3331 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3332 {
3333 const IOCTLEntry *ie;
3334 const argtype *arg_type;
3335 abi_long ret;
3336 uint8_t buf_temp[MAX_STRUCT_SIZE];
3337 int target_size;
3338 void *argptr;
3339
3340 ie = ioctl_entries;
3341 for(;;) {
3342 if (ie->target_cmd == 0) {
3343 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3344 return -TARGET_ENOSYS;
3345 }
3346 if (ie->target_cmd == cmd)
3347 break;
3348 ie++;
3349 }
3350 arg_type = ie->arg_type;
3351 #if defined(DEBUG)
3352 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3353 #endif
3354 if (ie->do_ioctl) {
3355 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3356 }
3357
3358 switch(arg_type[0]) {
3359 case TYPE_NULL:
3360 /* no argument */
3361 ret = get_errno(ioctl(fd, ie->host_cmd));
3362 break;
3363 case TYPE_PTRVOID:
3364 case TYPE_INT:
3365 /* int argment */
3366 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3367 break;
3368 case TYPE_PTR:
3369 arg_type++;
3370 target_size = thunk_type_size(arg_type, 0);
3371 switch(ie->access) {
3372 case IOC_R:
3373 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3374 if (!is_error(ret)) {
3375 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3376 if (!argptr)
3377 return -TARGET_EFAULT;
3378 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3379 unlock_user(argptr, arg, target_size);
3380 }
3381 break;
3382 case IOC_W:
3383 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3384 if (!argptr)
3385 return -TARGET_EFAULT;
3386 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3387 unlock_user(argptr, arg, 0);
3388 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3389 break;
3390 default:
3391 case IOC_RW:
3392 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3393 if (!argptr)
3394 return -TARGET_EFAULT;
3395 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3396 unlock_user(argptr, arg, 0);
3397 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3398 if (!is_error(ret)) {
3399 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3400 if (!argptr)
3401 return -TARGET_EFAULT;
3402 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3403 unlock_user(argptr, arg, target_size);
3404 }
3405 break;
3406 }
3407 break;
3408 default:
3409 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3410 (long)cmd, arg_type[0]);
3411 ret = -TARGET_ENOSYS;
3412 break;
3413 }
3414 return ret;
3415 }
3416
3417 static const bitmask_transtbl iflag_tbl[] = {
3418 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3419 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3420 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3421 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3422 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3423 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3424 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3425 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3426 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3427 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3428 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3429 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3430 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3431 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3432 { 0, 0, 0, 0 }
3433 };
3434
3435 static const bitmask_transtbl oflag_tbl[] = {
3436 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3437 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3438 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3439 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3440 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3441 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3442 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3443 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3444 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3445 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3446 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3447 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3448 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3449 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3450 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3451 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3452 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3453 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3454 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3455 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3456 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3457 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3458 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3459 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3460 { 0, 0, 0, 0 }
3461 };
3462
3463 static const bitmask_transtbl cflag_tbl[] = {
3464 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3465 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3466 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3467 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3468 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3469 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3470 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3471 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3472 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3473 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3474 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3475 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3476 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3477 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3478 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3479 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3480 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3481 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3482 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3483 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3484 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3485 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3486 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3487 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3488 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3489 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3490 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3491 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3492 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3493 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3494 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3495 { 0, 0, 0, 0 }
3496 };
3497
3498 static const bitmask_transtbl lflag_tbl[] = {
3499 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3500 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3501 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3502 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3503 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3504 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3505 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3506 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3507 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3508 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3509 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3510 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3511 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3512 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3513 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3514 { 0, 0, 0, 0 }
3515 };
3516
3517 static void target_to_host_termios (void *dst, const void *src)
3518 {
3519 struct host_termios *host = dst;
3520 const struct target_termios *target = src;
3521
3522 host->c_iflag =
3523 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3524 host->c_oflag =
3525 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3526 host->c_cflag =
3527 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3528 host->c_lflag =
3529 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3530 host->c_line = target->c_line;
3531
3532 memset(host->c_cc, 0, sizeof(host->c_cc));
3533 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3534 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3535 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3536 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3537 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3538 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3539 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3540 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3541 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3542 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3543 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3544 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3545 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3546 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3547 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3548 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3549 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3550 }
3551
3552 static void host_to_target_termios (void *dst, const void *src)
3553 {
3554 struct target_termios *target = dst;
3555 const struct host_termios *host = src;
3556
3557 target->c_iflag =
3558 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3559 target->c_oflag =
3560 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3561 target->c_cflag =
3562 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3563 target->c_lflag =
3564 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3565 target->c_line = host->c_line;
3566
3567 memset(target->c_cc, 0, sizeof(target->c_cc));
3568 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3569 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3570 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3571 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3572 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3573 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3574 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3575 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3576 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3577 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3578 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3579 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3580 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3581 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3582 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3583 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3584 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3585 }
3586
3587 static const StructEntry struct_termios_def = {
3588 .convert = { host_to_target_termios, target_to_host_termios },
3589 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3590 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3591 };
3592
3593 static bitmask_transtbl mmap_flags_tbl[] = {
3594 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3595 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3596 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3597 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3598 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3599 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3600 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3601 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3602 { 0, 0, 0, 0 }
3603 };
3604
3605 #if defined(TARGET_I386)
3606
3607 /* NOTE: there is really one LDT for all the threads */
3608 static uint8_t *ldt_table;
3609
3610 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3611 {
3612 int size;
3613 void *p;
3614
3615 if (!ldt_table)
3616 return 0;
3617 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3618 if (size > bytecount)
3619 size = bytecount;
3620 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3621 if (!p)
3622 return -TARGET_EFAULT;
3623 /* ??? Should this by byteswapped? */
3624 memcpy(p, ldt_table, size);
3625 unlock_user(p, ptr, size);
3626 return size;
3627 }
3628
3629 /* XXX: add locking support */
3630 static abi_long write_ldt(CPUX86State *env,
3631 abi_ulong ptr, unsigned long bytecount, int oldmode)
3632 {
3633 struct target_modify_ldt_ldt_s ldt_info;
3634 struct target_modify_ldt_ldt_s *target_ldt_info;
3635 int seg_32bit, contents, read_exec_only, limit_in_pages;
3636 int seg_not_present, useable, lm;
3637 uint32_t *lp, entry_1, entry_2;
3638
3639 if (bytecount != sizeof(ldt_info))
3640 return -TARGET_EINVAL;
3641 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3642 return -TARGET_EFAULT;
3643 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3644 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3645 ldt_info.limit = tswap32(target_ldt_info->limit);
3646 ldt_info.flags = tswap32(target_ldt_info->flags);
3647 unlock_user_struct(target_ldt_info, ptr, 0);
3648
3649 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3650 return -TARGET_EINVAL;
3651 seg_32bit = ldt_info.flags & 1;
3652 contents = (ldt_info.flags >> 1) & 3;
3653 read_exec_only = (ldt_info.flags >> 3) & 1;
3654 limit_in_pages = (ldt_info.flags >> 4) & 1;
3655 seg_not_present = (ldt_info.flags >> 5) & 1;
3656 useable = (ldt_info.flags >> 6) & 1;
3657 #ifdef TARGET_ABI32
3658 lm = 0;
3659 #else
3660 lm = (ldt_info.flags >> 7) & 1;
3661 #endif
3662 if (contents == 3) {
3663 if (oldmode)
3664 return -TARGET_EINVAL;
3665 if (seg_not_present == 0)
3666 return -TARGET_EINVAL;
3667 }
3668 /* allocate the LDT */
3669 if (!ldt_table) {
3670 env->ldt.base = target_mmap(0,
3671 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3672 PROT_READ|PROT_WRITE,
3673 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3674 if (env->ldt.base == -1)
3675 return -TARGET_ENOMEM;
3676 memset(g2h(env->ldt.base), 0,
3677 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3678 env->ldt.limit = 0xffff;
3679 ldt_table = g2h(env->ldt.base);
3680 }
3681
3682 /* NOTE: same code as Linux kernel */
3683 /* Allow LDTs to be cleared by the user. */
3684 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3685 if (oldmode ||
3686 (contents == 0 &&
3687 read_exec_only == 1 &&
3688 seg_32bit == 0 &&
3689 limit_in_pages == 0 &&
3690 seg_not_present == 1 &&
3691 useable == 0 )) {
3692 entry_1 = 0;
3693 entry_2 = 0;
3694 goto install;
3695 }
3696 }
3697
3698 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3699 (ldt_info.limit & 0x0ffff);
3700 entry_2 = (ldt_info.base_addr & 0xff000000) |
3701 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3702 (ldt_info.limit & 0xf0000) |
3703 ((read_exec_only ^ 1) << 9) |
3704 (contents << 10) |
3705 ((seg_not_present ^ 1) << 15) |
3706 (seg_32bit << 22) |
3707 (limit_in_pages << 23) |
3708 (lm << 21) |
3709 0x7000;
3710 if (!oldmode)
3711 entry_2 |= (useable << 20);
3712
3713 /* Install the new entry ... */
3714 install:
3715 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3716 lp[0] = tswap32(entry_1);
3717 lp[1] = tswap32(entry_2);
3718 return 0;
3719 }
3720
3721 /* specific and weird i386 syscalls */
3722 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3723 unsigned long bytecount)
3724 {
3725 abi_long ret;
3726
3727 switch (func) {
3728 case 0:
3729 ret = read_ldt(ptr, bytecount);
3730 break;
3731 case 1:
3732 ret = write_ldt(env, ptr, bytecount, 1);
3733 break;
3734 case 0x11:
3735 ret = write_ldt(env, ptr, bytecount, 0);
3736 break;
3737 default:
3738 ret = -TARGET_ENOSYS;
3739 break;
3740 }
3741 return ret;
3742 }
3743
3744 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3745 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3746 {
3747 uint64_t *gdt_table = g2h(env->gdt.base);
3748 struct target_modify_ldt_ldt_s ldt_info;
3749 struct target_modify_ldt_ldt_s *target_ldt_info;
3750 int seg_32bit, contents, read_exec_only, limit_in_pages;
3751 int seg_not_present, useable, lm;
3752 uint32_t *lp, entry_1, entry_2;
3753 int i;
3754
3755 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3756 if (!target_ldt_info)
3757 return -TARGET_EFAULT;
3758 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3759 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3760 ldt_info.limit = tswap32(target_ldt_info->limit);
3761 ldt_info.flags = tswap32(target_ldt_info->flags);
3762 if (ldt_info.entry_number == -1) {
3763 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3764 if (gdt_table[i] == 0) {
3765 ldt_info.entry_number = i;
3766 target_ldt_info->entry_number = tswap32(i);
3767 break;
3768 }
3769 }
3770 }
3771 unlock_user_struct(target_ldt_info, ptr, 1);
3772
3773 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3774 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3775 return -TARGET_EINVAL;
3776 seg_32bit = ldt_info.flags & 1;
3777 contents = (ldt_info.flags >> 1) & 3;
3778 read_exec_only = (ldt_info.flags >> 3) & 1;
3779 limit_in_pages = (ldt_info.flags >> 4) & 1;
3780 seg_not_present = (ldt_info.flags >> 5) & 1;
3781 useable = (ldt_info.flags >> 6) & 1;
3782 #ifdef TARGET_ABI32
3783 lm = 0;
3784 #else
3785 lm = (ldt_info.flags >> 7) & 1;
3786 #endif
3787
3788 if (contents == 3) {
3789 if (seg_not_present == 0)
3790 return -TARGET_EINVAL;
3791 }
3792
3793 /* NOTE: same code as Linux kernel */
3794 /* Allow LDTs to be cleared by the user. */
3795 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3796 if ((contents == 0 &&
3797 read_exec_only == 1 &&
3798 seg_32bit == 0 &&
3799 limit_in_pages == 0 &&
3800 seg_not_present == 1 &&
3801 useable == 0 )) {
3802 entry_1 = 0;
3803 entry_2 = 0;
3804 goto install;
3805 }
3806 }
3807
3808 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3809 (ldt_info.limit & 0x0ffff);
3810 entry_2 = (ldt_info.base_addr & 0xff000000) |
3811 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3812 (ldt_info.limit & 0xf0000) |
3813 ((read_exec_only ^ 1) << 9) |
3814 (contents << 10) |
3815 ((seg_not_present ^ 1) << 15) |
3816 (seg_32bit << 22) |
3817 (limit_in_pages << 23) |
3818 (useable << 20) |
3819 (lm << 21) |
3820 0x7000;
3821
3822 /* Install the new entry ... */
3823 install:
3824 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3825 lp[0] = tswap32(entry_1);
3826 lp[1] = tswap32(entry_2);
3827 return 0;
3828 }
3829
3830 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3831 {
3832 struct target_modify_ldt_ldt_s *target_ldt_info;
3833 uint64_t *gdt_table = g2h(env->gdt.base);
3834 uint32_t base_addr, limit, flags;
3835 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3836 int seg_not_present, useable, lm;
3837 uint32_t *lp, entry_1, entry_2;
3838
3839 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3840 if (!target_ldt_info)
3841 return -TARGET_EFAULT;
3842 idx = tswap32(target_ldt_info->entry_number);
3843 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3844 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3845 unlock_user_struct(target_ldt_info, ptr, 1);
3846 return -TARGET_EINVAL;
3847 }
3848 lp = (uint32_t *)(gdt_table + idx);
3849 entry_1 = tswap32(lp[0]);
3850 entry_2 = tswap32(lp[1]);
3851
3852 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3853 contents = (entry_2 >> 10) & 3;
3854 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3855 seg_32bit = (entry_2 >> 22) & 1;
3856 limit_in_pages = (entry_2 >> 23) & 1;
3857 useable = (entry_2 >> 20) & 1;
3858 #ifdef TARGET_ABI32
3859 lm = 0;
3860 #else
3861 lm = (entry_2 >> 21) & 1;
3862 #endif
3863 flags = (seg_32bit << 0) | (contents << 1) |
3864 (read_exec_only << 3) | (limit_in_pages << 4) |
3865 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3866 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3867 base_addr = (entry_1 >> 16) |
3868 (entry_2 & 0xff000000) |
3869 ((entry_2 & 0xff) << 16);
3870 target_ldt_info->base_addr = tswapal(base_addr);
3871 target_ldt_info->limit = tswap32(limit);
3872 target_ldt_info->flags = tswap32(flags);
3873 unlock_user_struct(target_ldt_info, ptr, 1);
3874 return 0;
3875 }
3876 #endif /* TARGET_I386 && TARGET_ABI32 */
3877
3878 #ifndef TARGET_ABI32
3879 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3880 {
3881 abi_long ret = 0;
3882 abi_ulong val;
3883 int idx;
3884
3885 switch(code) {
3886 case TARGET_ARCH_SET_GS:
3887 case TARGET_ARCH_SET_FS:
3888 if (code == TARGET_ARCH_SET_GS)
3889 idx = R_GS;
3890 else
3891 idx = R_FS;
3892 cpu_x86_load_seg(env, idx, 0);
3893 env->segs[idx].base = addr;
3894 break;
3895 case TARGET_ARCH_GET_GS:
3896 case TARGET_ARCH_GET_FS:
3897 if (code == TARGET_ARCH_GET_GS)
3898 idx = R_GS;
3899 else
3900 idx = R_FS;
3901 val = env->segs[idx].base;
3902 if (put_user(val, addr, abi_ulong))
3903 ret = -TARGET_EFAULT;
3904 break;
3905 default:
3906 ret = -TARGET_EINVAL;
3907 break;
3908 }
3909 return ret;
3910 }
3911 #endif
3912
3913 #endif /* defined(TARGET_I386) */
3914
3915 #define NEW_STACK_SIZE 0x40000
3916
3917 #if defined(CONFIG_USE_NPTL)
3918
3919 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3920 typedef struct {
3921 CPUState *env;
3922 pthread_mutex_t mutex;
3923 pthread_cond_t cond;
3924 pthread_t thread;
3925 uint32_t tid;
3926 abi_ulong child_tidptr;
3927 abi_ulong parent_tidptr;
3928 sigset_t sigmask;
3929 } new_thread_info;
3930
3931 static void *clone_func(void *arg)
3932 {
3933 new_thread_info *info = arg;
3934 CPUState *env;
3935 TaskState *ts;
3936
3937 env = info->env;
3938 thread_env = env;
3939 ts = (TaskState *)thread_env->opaque;
3940 info->tid = gettid();
3941 env->host_tid = info->tid;
3942 task_settid(ts);
3943 if (info->child_tidptr)
3944 put_user_u32(info->tid, info->child_tidptr);
3945 if (info->parent_tidptr)
3946 put_user_u32(info->tid, info->parent_tidptr);
3947 /* Enable signals. */
3948 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3949 /* Signal to the parent that we're ready. */
3950 pthread_mutex_lock(&info->mutex);
3951 pthread_cond_broadcast(&info->cond);
3952 pthread_mutex_unlock(&info->mutex);
3953 /* Wait until the parent has finshed initializing the tls state. */
3954 pthread_mutex_lock(&clone_lock);
3955 pthread_mutex_unlock(&clone_lock);
3956 cpu_loop(env);
3957 /* never exits */
3958 return NULL;
3959 }
3960 #else
3961
3962 static int clone_func(void *arg)
3963 {
3964 CPUState *env = arg;
3965 cpu_loop(env);
3966 /* never exits */
3967 return 0;
3968 }
3969 #endif
3970
3971 /* do_fork() Must return host values and target errnos (unlike most
3972 do_*() functions). */
3973 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3974 abi_ulong parent_tidptr, target_ulong newtls,
3975 abi_ulong child_tidptr)
3976 {
3977 int ret;
3978 TaskState *ts;
3979 CPUState *new_env;
3980 #if defined(CONFIG_USE_NPTL)
3981 unsigned int nptl_flags;
3982 sigset_t sigmask;
3983 #else
3984 uint8_t *new_stack;
3985 #endif
3986
3987 /* Emulate vfork() with fork() */
3988 if (flags & CLONE_VFORK)
3989 flags &= ~(CLONE_VFORK | CLONE_VM);
3990
3991 if (flags & CLONE_VM) {
3992 TaskState *parent_ts = (TaskState *)env->opaque;
3993 #if defined(CONFIG_USE_NPTL)
3994 new_thread_info info;
3995 pthread_attr_t attr;
3996 #endif
3997 ts = g_malloc0(sizeof(TaskState));
3998 init_task_state(ts);
3999 /* we create a new CPU instance. */
4000 new_env = cpu_copy(env);
4001 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4002 cpu_reset(new_env);
4003 #endif
4004 /* Init regs that differ from the parent. */
4005 cpu_clone_regs(new_env, newsp);
4006 new_env->opaque = ts;
4007 ts->bprm = parent_ts->bprm;
4008 ts->info = parent_ts->info;
4009 #if defined(CONFIG_USE_NPTL)
4010 nptl_flags = flags;
4011 flags &= ~CLONE_NPTL_FLAGS2;
4012
4013 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4014 ts->child_tidptr = child_tidptr;
4015 }
4016
4017 if (nptl_flags & CLONE_SETTLS)
4018 cpu_set_tls (new_env, newtls);
4019
4020 /* Grab a mutex so that thread setup appears atomic. */
4021 pthread_mutex_lock(&clone_lock);
4022
4023 memset(&info, 0, sizeof(info));
4024 pthread_mutex_init(&info.mutex, NULL);
4025 pthread_mutex_lock(&info.mutex);
4026 pthread_cond_init(&info.cond, NULL);
4027 info.env = new_env;
4028 if (nptl_flags & CLONE_CHILD_SETTID)
4029 info.child_tidptr = child_tidptr;
4030 if (nptl_flags & CLONE_PARENT_SETTID)
4031 info.parent_tidptr = parent_tidptr;
4032
4033 ret = pthread_attr_init(&attr);
4034 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4035 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4036 /* It is not safe to deliver signals until the child has finished
4037 initializing, so temporarily block all signals. */
4038 sigfillset(&sigmask);
4039 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4040
4041 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4042 /* TODO: Free new CPU state if thread creation failed. */
4043
4044 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4045 pthread_attr_destroy(&attr);
4046 if (ret == 0) {
4047 /* Wait for the child to initialize. */
4048 pthread_cond_wait(&info.cond, &info.mutex);
4049 ret = info.tid;
4050 if (flags & CLONE_PARENT_SETTID)
4051 put_user_u32(ret, parent_tidptr);
4052 } else {
4053 ret = -1;
4054 }
4055 pthread_mutex_unlock(&info.mutex);
4056 pthread_cond_destroy(&info.cond);
4057 pthread_mutex_destroy(&info.mutex);
4058 pthread_mutex_unlock(&clone_lock);
4059 #else
4060 if (flags & CLONE_NPTL_FLAGS2)
4061 return -EINVAL;
4062 /* This is probably going to die very quickly, but do it anyway. */
4063 new_stack = g_malloc0 (NEW_STACK_SIZE);
4064 #ifdef __ia64__
4065 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4066 #else
4067 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4068 #endif
4069 #endif
4070 } else {
4071 /* if no CLONE_VM, we consider it is a fork */
4072 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4073 return -EINVAL;
4074 fork_start();
4075 ret = fork();
4076 if (ret == 0) {
4077 /* Child Process. */
4078 cpu_clone_regs(env, newsp);
4079 fork_end(1);
4080 #if defined(CONFIG_USE_NPTL)
4081 /* There is a race condition here. The parent process could
4082 theoretically read the TID in the child process before the child
4083 tid is set. This would require using either ptrace
4084 (not implemented) or having *_tidptr to point at a shared memory
4085 mapping. We can't repeat the spinlock hack used above because
4086 the child process gets its own copy of the lock. */
4087 if (flags & CLONE_CHILD_SETTID)
4088 put_user_u32(gettid(), child_tidptr);
4089 if (flags & CLONE_PARENT_SETTID)
4090 put_user_u32(gettid(), parent_tidptr);
4091 ts = (TaskState *)env->opaque;
4092 if (flags & CLONE_SETTLS)
4093 cpu_set_tls (env, newtls);
4094 if (flags & CLONE_CHILD_CLEARTID)
4095 ts->child_tidptr = child_tidptr;
4096 #endif
4097 } else {
4098 fork_end(0);
4099 }
4100 }
4101 return ret;
4102 }
4103
4104 /* warning : doesn't handle linux specific flags... */
4105 static int target_to_host_fcntl_cmd(int cmd)
4106 {
4107 switch(cmd) {
4108 case TARGET_F_DUPFD:
4109 case TARGET_F_GETFD:
4110 case TARGET_F_SETFD:
4111 case TARGET_F_GETFL:
4112 case TARGET_F_SETFL:
4113 return cmd;
4114 case TARGET_F_GETLK:
4115 return F_GETLK;
4116 case TARGET_F_SETLK:
4117 return F_SETLK;
4118 case TARGET_F_SETLKW:
4119 return F_SETLKW;
4120 case TARGET_F_GETOWN:
4121 return F_GETOWN;
4122 case TARGET_F_SETOWN:
4123 return F_SETOWN;
4124 case TARGET_F_GETSIG:
4125 return F_GETSIG;
4126 case TARGET_F_SETSIG:
4127 return F_SETSIG;
4128 #if TARGET_ABI_BITS == 32
4129 case TARGET_F_GETLK64:
4130 return F_GETLK64;
4131 case TARGET_F_SETLK64:
4132 return F_SETLK64;
4133 case TARGET_F_SETLKW64:
4134 return F_SETLKW64;
4135 #endif
4136 case TARGET_F_SETLEASE:
4137 return F_SETLEASE;
4138 case TARGET_F_GETLEASE:
4139 return F_GETLEASE;
4140 #ifdef F_DUPFD_CLOEXEC
4141 case TARGET_F_DUPFD_CLOEXEC:
4142 return F_DUPFD_CLOEXEC;
4143 #endif
4144 case TARGET_F_NOTIFY:
4145 return F_NOTIFY;
4146 default:
4147 return -TARGET_EINVAL;
4148 }
4149 return -TARGET_EINVAL;
4150 }
4151
4152 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4153 {
4154 struct flock fl;
4155 struct target_flock *target_fl;
4156 struct flock64 fl64;
4157 struct target_flock64 *target_fl64;
4158 abi_long ret;
4159 int host_cmd = target_to_host_fcntl_cmd(cmd);
4160
4161 if (host_cmd == -TARGET_EINVAL)
4162 return host_cmd;
4163
4164 switch(cmd) {
4165 case TARGET_F_GETLK:
4166 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4167 return -TARGET_EFAULT;
4168 fl.l_type = tswap16(target_fl->l_type);
4169 fl.l_whence = tswap16(target_fl->l_whence);
4170 fl.l_start = tswapal(target_fl->l_start);
4171 fl.l_len = tswapal(target_fl->l_len);
4172 fl.l_pid = tswap32(target_fl->l_pid);
4173 unlock_user_struct(target_fl, arg, 0);
4174 ret = get_errno(fcntl(fd, host_cmd, &fl));
4175 if (ret == 0) {
4176 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4177 return -TARGET_EFAULT;
4178 target_fl->l_type = tswap16(fl.l_type);
4179 target_fl->l_whence = tswap16(fl.l_whence);
4180 target_fl->l_start = tswapal(fl.l_start);
4181 target_fl->l_len = tswapal(fl.l_len);
4182 target_fl->l_pid = tswap32(fl.l_pid);
4183 unlock_user_struct(target_fl, arg, 1);
4184 }
4185 break;
4186
4187 case TARGET_F_SETLK:
4188 case TARGET_F_SETLKW:
4189 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4190 return -TARGET_EFAULT;
4191 fl.l_type = tswap16(target_fl->l_type);
4192 fl.l_whence = tswap16(target_fl->l_whence);
4193 fl.l_start = tswapal(target_fl->l_start);
4194 fl.l_len = tswapal(target_fl->l_len);
4195 fl.l_pid = tswap32(target_fl->l_pid);
4196 unlock_user_struct(target_fl, arg, 0);
4197 ret = get_errno(fcntl(fd, host_cmd, &fl));
4198 break;
4199
4200 case TARGET_F_GETLK64:
4201 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4202 return -TARGET_EFAULT;
4203 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4204 fl64.l_whence = tswap16(target_fl64->l_whence);
4205 fl64.l_start = tswap64(target_fl64->l_start);
4206 fl64.l_len = tswap64(target_fl64->l_len);
4207 fl64.l_pid = tswap32(target_fl64->l_pid);
4208 unlock_user_struct(target_fl64, arg, 0);
4209 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4210 if (ret == 0) {
4211 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4212 return -TARGET_EFAULT;
4213 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4214 target_fl64->l_whence = tswap16(fl64.l_whence);
4215 target_fl64->l_start = tswap64(fl64.l_start);
4216 target_fl64->l_len = tswap64(fl64.l_len);
4217 target_fl64->l_pid = tswap32(fl64.l_pid);
4218 unlock_user_struct(target_fl64, arg, 1);
4219 }
4220 break;
4221 case TARGET_F_SETLK64:
4222 case TARGET_F_SETLKW64:
4223 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4224 return -TARGET_EFAULT;
4225 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4226 fl64.l_whence = tswap16(target_fl64->l_whence);
4227 fl64.l_start = tswap64(target_fl64->l_start);
4228 fl64.l_len = tswap64(target_fl64->l_len);
4229 fl64.l_pid = tswap32(target_fl64->l_pid);
4230 unlock_user_struct(target_fl64, arg, 0);
4231 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4232 break;
4233
4234 case TARGET_F_GETFL:
4235 ret = get_errno(fcntl(fd, host_cmd, arg));
4236 if (ret >= 0) {
4237 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4238 }
4239 break;
4240
4241 case TARGET_F_SETFL:
4242 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4243 break;
4244
4245 case TARGET_F_SETOWN:
4246 case TARGET_F_GETOWN:
4247 case TARGET_F_SETSIG:
4248 case TARGET_F_GETSIG:
4249 case TARGET_F_SETLEASE:
4250 case TARGET_F_GETLEASE:
4251 ret = get_errno(fcntl(fd, host_cmd, arg));
4252 break;
4253
4254 default:
4255 ret = get_errno(fcntl(fd, cmd, arg));
4256 break;
4257 }
4258 return ret;
4259 }
4260
4261 #ifdef USE_UID16
4262
4263 static inline int high2lowuid(int uid)
4264 {
4265 if (uid > 65535)
4266 return 65534;
4267 else
4268 return uid;
4269 }
4270
4271 static inline int high2lowgid(int gid)
4272 {
4273 if (gid > 65535)
4274 return 65534;
4275 else
4276 return gid;
4277 }
4278
4279 static inline int low2highuid(int uid)
4280 {
4281 if ((int16_t)uid == -1)
4282 return -1;
4283 else
4284 return uid;
4285 }
4286
4287 static inline int low2highgid(int gid)
4288 {
4289 if ((int16_t)gid == -1)
4290 return -1;
4291 else
4292 return gid;
4293 }
4294 static inline int tswapid(int id)
4295 {
4296 return tswap16(id);
4297 }
4298 #else /* !USE_UID16 */
4299 static inline int high2lowuid(int uid)
4300 {
4301 return uid;
4302 }
4303 static inline int high2lowgid(int gid)
4304 {
4305 return gid;
4306 }
4307 static inline int low2highuid(int uid)
4308 {
4309 return uid;
4310 }
4311 static inline int low2highgid(int gid)
4312 {
4313 return gid;
4314 }
4315 static inline int tswapid(int id)
4316 {
4317 return tswap32(id);
4318 }
4319 #endif /* USE_UID16 */
4320
4321 void syscall_init(void)
4322 {
4323 IOCTLEntry *ie;
4324 const argtype *arg_type;
4325 int size;
4326 int i;
4327
4328 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4329 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4330 #include "syscall_types.h"
4331 #undef STRUCT
4332 #undef STRUCT_SPECIAL
4333
4334 /* we patch the ioctl size if necessary. We rely on the fact that
4335 no ioctl has all the bits at '1' in the size field */
4336 ie = ioctl_entries;
4337 while (ie->target_cmd != 0) {
4338 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4339 TARGET_IOC_SIZEMASK) {
4340 arg_type = ie->arg_type;
4341 if (arg_type[0] != TYPE_PTR) {
4342 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4343 ie->target_cmd);
4344 exit(1);
4345 }
4346 arg_type++;
4347 size = thunk_type_size(arg_type, 0);
4348 ie->target_cmd = (ie->target_cmd &
4349 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4350 (size << TARGET_IOC_SIZESHIFT);
4351 }
4352
4353 /* Build target_to_host_errno_table[] table from
4354 * host_to_target_errno_table[]. */
4355 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4356 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4357
4358 /* automatic consistency check if same arch */
4359 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4360 (defined(__x86_64__) && defined(TARGET_X86_64))
4361 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4362 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4363 ie->name, ie->target_cmd, ie->host_cmd);
4364 }
4365 #endif
4366 ie++;
4367 }
4368 }
4369
4370 #if TARGET_ABI_BITS == 32
4371 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4372 {
4373 #ifdef TARGET_WORDS_BIGENDIAN
4374 return ((uint64_t)word0 << 32) | word1;
4375 #else
4376 return ((uint64_t)word1 << 32) | word0;
4377 #endif
4378 }
4379 #else /* TARGET_ABI_BITS == 32 */
4380 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4381 {
4382 return word0;
4383 }
4384 #endif /* TARGET_ABI_BITS != 32 */
4385
4386 #ifdef TARGET_NR_truncate64
4387 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4388 abi_long arg2,
4389 abi_long arg3,
4390 abi_long arg4)
4391 {
4392 if (regpairs_aligned(cpu_env)) {
4393 arg2 = arg3;
4394 arg3 = arg4;
4395 }
4396 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4397 }
4398 #endif
4399
4400 #ifdef TARGET_NR_ftruncate64
4401 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4402 abi_long arg2,
4403 abi_long arg3,
4404 abi_long arg4)
4405 {
4406 if (regpairs_aligned(cpu_env)) {
4407 arg2 = arg3;
4408 arg3 = arg4;
4409 }
4410 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4411 }
4412 #endif
4413
4414 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4415 abi_ulong target_addr)
4416 {
4417 struct target_timespec *target_ts;
4418
4419 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4420 return -TARGET_EFAULT;
4421 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4422 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4423 unlock_user_struct(target_ts, target_addr, 0);
4424 return 0;
4425 }
4426
4427 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4428 struct timespec *host_ts)
4429 {
4430 struct target_timespec *target_ts;
4431
4432 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4433 return -TARGET_EFAULT;
4434 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4435 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4436 unlock_user_struct(target_ts, target_addr, 1);
4437 return 0;
4438 }
4439
4440 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4441 static inline abi_long host_to_target_stat64(void *cpu_env,
4442 abi_ulong target_addr,
4443 struct stat *host_st)
4444 {
4445 #ifdef TARGET_ARM
4446 if (((CPUARMState *)cpu_env)->eabi) {
4447 struct target_eabi_stat64 *target_st;
4448
4449 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4450 return -TARGET_EFAULT;
4451 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4452 __put_user(host_st->st_dev, &target_st->st_dev);
4453 __put_user(host_st->st_ino, &target_st->st_ino);
4454 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4455 __put_user(host_st->st_ino, &target_st->__st_ino);
4456 #endif
4457 __put_user(host_st->st_mode, &target_st->st_mode);
4458 __put_user(host_st->st_nlink, &target_st->st_nlink);
4459 __put_user(host_st->st_uid, &target_st->st_uid);
4460 __put_user(host_st->st_gid, &target_st->st_gid);
4461 __put_user(host_st->st_rdev, &target_st->st_rdev);
4462 __put_user(host_st->st_size, &target_st->st_size);
4463 __put_user(host_st->st_blksize, &target_st->st_blksize);
4464 __put_user(host_st->st_blocks, &target_st->st_blocks);
4465 __put_user(host_st->st_atime, &target_st->target_st_atime);
4466 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4467 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4468 unlock_user_struct(target_st, target_addr, 1);
4469 } else
4470 #endif
4471 {
4472 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4473 struct target_stat *target_st;
4474 #else
4475 struct target_stat64 *target_st;
4476 #endif
4477
4478 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4479 return -TARGET_EFAULT;
4480 memset(target_st, 0, sizeof(*target_st));
4481 __put_user(host_st->st_dev, &target_st->st_dev);
4482 __put_user(host_st->st_ino, &target_st->st_ino);
4483 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4484 __put_user(host_st->st_ino, &target_st->__st_ino);
4485 #endif
4486 __put_user(host_st->st_mode, &target_st->st_mode);
4487 __put_user(host_st->st_nlink, &target_st->st_nlink);
4488 __put_user(host_st->st_uid, &target_st->st_uid);
4489 __put_user(host_st->st_gid, &target_st->st_gid);
4490 __put_user(host_st->st_rdev, &target_st->st_rdev);
4491 /* XXX: better use of kernel struct */
4492 __put_user(host_st->st_size, &target_st->st_size);
4493 __put_user(host_st->st_blksize, &target_st->st_blksize);
4494 __put_user(host_st->st_blocks, &target_st->st_blocks);
4495 __put_user(host_st->st_atime, &target_st->target_st_atime);
4496 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4497 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4498 unlock_user_struct(target_st, target_addr, 1);
4499 }
4500
4501 return 0;
4502 }
4503 #endif
4504
4505 #if defined(CONFIG_USE_NPTL)
4506 /* ??? Using host futex calls even when target atomic operations
4507 are not really atomic probably breaks things. However implementing
4508 futexes locally would make futexes shared between multiple processes
4509 tricky. However they're probably useless because guest atomic
4510 operations won't work either. */
4511 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4512 target_ulong uaddr2, int val3)
4513 {
4514 struct timespec ts, *pts;
4515 int base_op;
4516
4517 /* ??? We assume FUTEX_* constants are the same on both host
4518 and target. */
4519 #ifdef FUTEX_CMD_MASK
4520 base_op = op & FUTEX_CMD_MASK;
4521 #else
4522 base_op = op;
4523 #endif
4524 switch (base_op) {
4525 case FUTEX_WAIT:
4526 if (timeout) {
4527 pts = &ts;
4528 target_to_host_timespec(pts, timeout);
4529 } else {
4530 pts = NULL;
4531 }
4532 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4533 pts, NULL, 0));
4534 case FUTEX_WAKE:
4535 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4536 case FUTEX_FD:
4537 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4538 case FUTEX_REQUEUE:
4539 case FUTEX_CMP_REQUEUE:
4540 case FUTEX_WAKE_OP:
4541 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4542 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4543 But the prototype takes a `struct timespec *'; insert casts
4544 to satisfy the compiler. We do not need to tswap TIMEOUT
4545 since it's not compared to guest memory. */
4546 pts = (struct timespec *)(uintptr_t) timeout;
4547 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4548 g2h(uaddr2),
4549 (base_op == FUTEX_CMP_REQUEUE
4550 ? tswap32(val3)
4551 : val3)));
4552 default:
4553 return -TARGET_ENOSYS;
4554 }
4555 }
4556 #endif
4557
4558 /* Map host to target signal numbers for the wait family of syscalls.
4559 Assume all other status bits are the same. */
4560 static int host_to_target_waitstatus(int status)
4561 {
4562 if (WIFSIGNALED(status)) {
4563 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4564 }
4565 if (WIFSTOPPED(status)) {
4566 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4567 | (status & 0xff);
4568 }
4569 return status;
4570 }
4571
4572 int get_osversion(void)
4573 {
4574 static int osversion;
4575 struct new_utsname buf;
4576 const char *s;
4577 int i, n, tmp;
4578 if (osversion)
4579 return osversion;
4580 if (qemu_uname_release && *qemu_uname_release) {
4581 s = qemu_uname_release;
4582 } else {
4583 if (sys_uname(&buf))
4584 return 0;
4585 s = buf.release;
4586 }
4587 tmp = 0;
4588 for (i = 0; i < 3; i++) {
4589 n = 0;
4590 while (*s >= '0' && *s <= '9') {
4591 n *= 10;
4592 n += *s - '0';
4593 s++;
4594 }
4595 tmp = (tmp << 8) + n;
4596 if (*s == '.')
4597 s++;
4598 }
4599 osversion = tmp;
4600 return osversion;
4601 }
4602
4603
4604 static int open_self_maps(void *cpu_env, int fd)
4605 {
4606 TaskState *ts = ((CPUState *)cpu_env)->opaque;
4607
4608 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4609 (unsigned long long)ts->info->stack_limit,
4610 (unsigned long long)(ts->stack_base + (TARGET_PAGE_SIZE - 1))
4611 & TARGET_PAGE_MASK,
4612 (unsigned long long)ts->stack_base);
4613
4614 return 0;
4615 }
4616
4617 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
4618 {
4619 struct fake_open {
4620 const char *filename;
4621 int (*fill)(void *cpu_env, int fd);
4622 };
4623 const struct fake_open *fake_open;
4624 static const struct fake_open fakes[] = {
4625 { "/proc/self/maps", open_self_maps },
4626 { NULL, NULL }
4627 };
4628
4629 for (fake_open = fakes; fake_open->filename; fake_open++) {
4630 if (!strncmp(pathname, fake_open->filename,
4631 strlen(fake_open->filename))) {
4632 break;
4633 }
4634 }
4635
4636 if (fake_open->filename) {
4637 const char *tmpdir;
4638 char filename[PATH_MAX];
4639 int fd, r;
4640
4641 /* create temporary file to map stat to */
4642 tmpdir = getenv("TMPDIR");
4643 if (!tmpdir)
4644 tmpdir = "/tmp";
4645 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
4646 fd = mkstemp(filename);
4647 if (fd < 0) {
4648 return fd;
4649 }
4650 unlink(filename);
4651
4652 if ((r = fake_open->fill(cpu_env, fd))) {
4653 close(fd);
4654 return r;
4655 }
4656 lseek(fd, 0, SEEK_SET);
4657
4658 return fd;
4659 }
4660
4661 return get_errno(open(path(pathname), flags, mode));
4662 }
4663
4664 /* do_syscall() should always have a single exit point at the end so
4665 that actions, such as logging of syscall results, can be performed.
4666 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4667 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4668 abi_long arg2, abi_long arg3, abi_long arg4,
4669 abi_long arg5, abi_long arg6, abi_long arg7,
4670 abi_long arg8)
4671 {
4672 abi_long ret;
4673 struct stat st;
4674 struct statfs stfs;
4675 void *p;
4676
4677 #ifdef DEBUG
4678 gemu_log("syscall %d", num);
4679 #endif
4680 if(do_strace)
4681 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4682
4683 switch(num) {
4684 case TARGET_NR_exit:
4685 #ifdef CONFIG_USE_NPTL
4686 /* In old applications this may be used to implement _exit(2).
4687 However in threaded applictions it is used for thread termination,
4688 and _exit_group is used for application termination.
4689 Do thread termination if we have more then one thread. */
4690 /* FIXME: This probably breaks if a signal arrives. We should probably
4691 be disabling signals. */
4692 if (first_cpu->next_cpu) {
4693 TaskState *ts;
4694 CPUState **lastp;
4695 CPUState *p;
4696
4697 cpu_list_lock();
4698 lastp = &first_cpu;
4699 p = first_cpu;
4700 while (p && p != (CPUState *)cpu_env) {
4701 lastp = &p->next_cpu;
4702 p = p->next_cpu;
4703 }
4704 /* If we didn't find the CPU for this thread then something is
4705 horribly wrong. */
4706 if (!p)
4707 abort();
4708 /* Remove the CPU from the list. */
4709 *lastp = p->next_cpu;
4710 cpu_list_unlock();
4711 ts = ((CPUState *)cpu_env)->opaque;
4712 if (ts->child_tidptr) {
4713 put_user_u32(0, ts->child_tidptr);
4714 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4715 NULL, NULL, 0);
4716 }
4717 thread_env = NULL;
4718 g_free(cpu_env);
4719 g_free(ts);
4720 pthread_exit(NULL);
4721 }
4722 #endif
4723 #ifdef TARGET_GPROF
4724 _mcleanup();
4725 #endif
4726 gdb_exit(cpu_env, arg1);
4727 _exit(arg1);
4728 ret = 0; /* avoid warning */
4729 break;
4730 case TARGET_NR_read:
4731 if (arg3 == 0)
4732 ret = 0;
4733 else {
4734 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4735 goto efault;
4736 ret = get_errno(read(arg1, p, arg3));
4737 unlock_user(p, arg2, ret);
4738 }
4739 break;
4740 case TARGET_NR_write:
4741 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4742 goto efault;
4743 ret = get_errno(write(arg1, p, arg3));
4744 unlock_user(p, arg2, 0);
4745 break;
4746 case TARGET_NR_open:
4747 if (!(p = lock_user_string(arg1)))
4748 goto efault;
4749 ret = get_errno(do_open(cpu_env, p,
4750 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4751 arg3));
4752 unlock_user(p, arg1, 0);
4753 break;
4754 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4755 case TARGET_NR_openat:
4756 if (!(p = lock_user_string(arg2)))
4757 goto efault;
4758 ret = get_errno(sys_openat(arg1,
4759 path(p),
4760 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4761 arg4));
4762 unlock_user(p, arg2, 0);
4763 break;
4764 #endif
4765 case TARGET_NR_close:
4766 ret = get_errno(close(arg1));
4767 break;
4768 case TARGET_NR_brk:
4769 ret = do_brk(arg1);
4770 break;
4771 case TARGET_NR_fork:
4772 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4773 break;
4774 #ifdef TARGET_NR_waitpid
4775 case TARGET_NR_waitpid:
4776 {
4777 int status;
4778 ret = get_errno(waitpid(arg1, &status, arg3));
4779 if (!is_error(ret) && arg2
4780 && put_user_s32(host_to_target_waitstatus(status), arg2))
4781 goto efault;
4782 }
4783 break;
4784 #endif
4785 #ifdef TARGET_NR_waitid
4786 case TARGET_NR_waitid:
4787 {
4788 siginfo_t info;
4789 info.si_pid = 0;
4790 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4791 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4792 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4793 goto efault;
4794 host_to_target_siginfo(p, &info);
4795 unlock_user(p, arg3, sizeof(target_siginfo_t));
4796 }
4797 }
4798 break;
4799 #endif
4800 #ifdef TARGET_NR_creat /* not on alpha */
4801 case TARGET_NR_creat:
4802 if (!(p = lock_user_string(arg1)))
4803 goto efault;
4804 ret = get_errno(creat(p, arg2));
4805 unlock_user(p, arg1, 0);
4806 break;
4807 #endif
4808 case TARGET_NR_link:
4809 {
4810 void * p2;
4811 p = lock_user_string(arg1);
4812 p2 = lock_user_string(arg2);
4813 if (!p || !p2)
4814 ret = -TARGET_EFAULT;
4815 else
4816 ret = get_errno(link(p, p2));
4817 unlock_user(p2, arg2, 0);
4818 unlock_user(p, arg1, 0);
4819 }
4820 break;
4821 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4822 case TARGET_NR_linkat:
4823 {
4824 void * p2 = NULL;
4825 if (!arg2 || !arg4)
4826 goto efault;
4827 p = lock_user_string(arg2);
4828 p2 = lock_user_string(arg4);
4829 if (!p || !p2)
4830 ret = -TARGET_EFAULT;
4831 else
4832 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4833 unlock_user(p, arg2, 0);
4834 unlock_user(p2, arg4, 0);
4835 }
4836 break;
4837 #endif
4838 case TARGET_NR_unlink:
4839 if (!(p = lock_user_string(arg1)))
4840 goto efault;
4841 ret = get_errno(unlink(p));
4842 unlock_user(p, arg1, 0);
4843 break;
4844 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4845 case TARGET_NR_unlinkat:
4846 if (!(p = lock_user_string(arg2)))
4847 goto efault;
4848 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4849 unlock_user(p, arg2, 0);
4850 break;
4851 #endif
4852 case TARGET_NR_execve:
4853 {
4854 char **argp, **envp;
4855 int argc, envc;
4856 abi_ulong gp;
4857 abi_ulong guest_argp;
4858 abi_ulong guest_envp;
4859 abi_ulong addr;
4860 char **q;
4861
4862 argc = 0;
4863 guest_argp = arg2;
4864 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4865 if (get_user_ual(addr, gp))
4866 goto efault;
4867 if (!addr)
4868 break;
4869 argc++;
4870 }
4871 envc = 0;
4872 guest_envp = arg3;
4873 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4874 if (get_user_ual(addr, gp))
4875 goto efault;
4876 if (!addr)
4877 break;
4878 envc++;
4879 }
4880
4881 argp = alloca((argc + 1) * sizeof(void *));
4882 envp = alloca((envc + 1) * sizeof(void *));
4883
4884 for (gp = guest_argp, q = argp; gp;
4885 gp += sizeof(abi_ulong), q++) {
4886 if (get_user_ual(addr, gp))
4887 goto execve_efault;
4888 if (!addr)
4889 break;
4890 if (!(*q = lock_user_string(addr)))
4891 goto execve_efault;
4892 }
4893 *q = NULL;
4894
4895 for (gp = guest_envp, q = envp; gp;
4896 gp += sizeof(abi_ulong), q++) {
4897 if (get_user_ual(addr, gp))
4898 goto execve_efault;
4899 if (!addr)
4900 break;
4901 if (!(*q = lock_user_string(addr)))
4902 goto execve_efault;
4903 }
4904 *q = NULL;
4905
4906 if (!(p = lock_user_string(arg1)))
4907 goto execve_efault;
4908 ret = get_errno(execve(p, argp, envp));
4909 unlock_user(p, arg1, 0);
4910
4911 goto execve_end;
4912
4913 execve_efault:
4914 ret = -TARGET_EFAULT;
4915
4916 execve_end:
4917 for (gp = guest_argp, q = argp; *q;
4918 gp += sizeof(abi_ulong), q++) {
4919 if (get_user_ual(addr, gp)
4920 || !addr)
4921 break;
4922 unlock_user(*q, addr, 0);
4923 }
4924 for (gp = guest_envp, q = envp; *q;
4925 gp += sizeof(abi_ulong), q++) {
4926 if (get_user_ual(addr, gp)
4927 || !addr)
4928 break;
4929 unlock_user(*q, addr, 0);
4930 }
4931 }
4932 break;
4933 case TARGET_NR_chdir:
4934 if (!(p = lock_user_string(arg1)))
4935 goto efault;
4936 ret = get_errno(chdir(p));
4937 unlock_user(p, arg1, 0);
4938 break;
4939 #ifdef TARGET_NR_time
4940 case TARGET_NR_time:
4941 {
4942 time_t host_time;
4943 ret = get_errno(time(&host_time));
4944 if (!is_error(ret)
4945 && arg1
4946 && put_user_sal(host_time, arg1))
4947 goto efault;
4948 }
4949 break;
4950 #endif
4951 case TARGET_NR_mknod:
4952 if (!(p = lock_user_string(arg1)))
4953 goto efault;
4954 ret = get_errno(mknod(p, arg2, arg3));
4955 unlock_user(p, arg1, 0);
4956 break;
4957 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4958 case TARGET_NR_mknodat:
4959 if (!(p = lock_user_string(arg2)))
4960 goto efault;
4961 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4962 unlock_user(p, arg2, 0);
4963 break;
4964 #endif
4965 case TARGET_NR_chmod:
4966 if (!(p = lock_user_string(arg1)))
4967 goto efault;
4968 ret = get_errno(chmod(p, arg2));
4969 unlock_user(p, arg1, 0);
4970 break;
4971 #ifdef TARGET_NR_break
4972 case TARGET_NR_break:
4973 goto unimplemented;
4974 #endif
4975 #ifdef TARGET_NR_oldstat
4976 case TARGET_NR_oldstat:
4977 goto unimplemented;
4978 #endif
4979 case TARGET_NR_lseek:
4980 ret = get_errno(lseek(arg1, arg2, arg3));
4981 break;
4982 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4983 /* Alpha specific */
4984 case TARGET_NR_getxpid:
4985 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4986 ret = get_errno(getpid());
4987 break;
4988 #endif
4989 #ifdef TARGET_NR_getpid
4990 case TARGET_NR_getpid:
4991 ret = get_errno(getpid());
4992 break;
4993 #endif
4994 case TARGET_NR_mount:
4995 {
4996 /* need to look at the data field */
4997 void *p2, *p3;
4998 p = lock_user_string(arg1);
4999 p2 = lock_user_string(arg2);
5000 p3 = lock_user_string(arg3);
5001 if (!p || !p2 || !p3)
5002 ret = -TARGET_EFAULT;
5003 else {
5004 /* FIXME - arg5 should be locked, but it isn't clear how to
5005 * do that since it's not guaranteed to be a NULL-terminated
5006 * string.
5007 */
5008 if ( ! arg5 )
5009 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5010 else
5011 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5012 }
5013 unlock_user(p, arg1, 0);
5014 unlock_user(p2, arg2, 0);
5015 unlock_user(p3, arg3, 0);
5016 break;
5017 }
5018 #ifdef TARGET_NR_umount
5019 case TARGET_NR_umount:
5020 if (!(p = lock_user_string(arg1)))
5021 goto efault;
5022 ret = get_errno(umount(p));
5023 unlock_user(p, arg1, 0);
5024 break;
5025 #endif
5026 #ifdef TARGET_NR_stime /* not on alpha */
5027 case TARGET_NR_stime:
5028 {
5029 time_t host_time;
5030 if (get_user_sal(host_time, arg1))
5031 goto efault;
5032 ret = get_errno(stime(&host_time));
5033 }
5034 break;
5035 #endif
5036 case TARGET_NR_ptrace:
5037 goto unimplemented;
5038 #ifdef TARGET_NR_alarm /* not on alpha */
5039 case TARGET_NR_alarm:
5040 ret = alarm(arg1);
5041 break;
5042 #endif
5043 #ifdef TARGET_NR_oldfstat
5044 case TARGET_NR_oldfstat:
5045 goto unimplemented;
5046 #endif
5047 #ifdef TARGET_NR_pause /* not on alpha */
5048 case TARGET_NR_pause:
5049 ret = get_errno(pause());
5050 break;
5051 #endif
5052 #ifdef TARGET_NR_utime
5053 case TARGET_NR_utime:
5054 {
5055 struct utimbuf tbuf, *host_tbuf;
5056 struct target_utimbuf *target_tbuf;
5057 if (arg2) {
5058 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5059 goto efault;
5060 tbuf.actime = tswapal(target_tbuf->actime);
5061 tbuf.modtime = tswapal(target_tbuf->modtime);
5062 unlock_user_struct(target_tbuf, arg2, 0);
5063 host_tbuf = &tbuf;
5064 } else {
5065 host_tbuf = NULL;
5066 }
5067 if (!(p = lock_user_string(arg1)))
5068 goto efault;
5069 ret = get_errno(utime(p, host_tbuf));
5070 unlock_user(p, arg1, 0);
5071 }
5072 break;
5073 #endif
5074 case TARGET_NR_utimes:
5075 {
5076 struct timeval *tvp, tv[2];
5077 if (arg2) {
5078 if (copy_from_user_timeval(&tv[0], arg2)
5079 || copy_from_user_timeval(&tv[1],
5080 arg2 + sizeof(struct target_timeval)))
5081 goto efault;
5082 tvp = tv;
5083 } else {
5084 tvp = NULL;
5085 }
5086 if (!(p = lock_user_string(arg1)))
5087 goto efault;
5088 ret = get_errno(utimes(p, tvp));
5089 unlock_user(p, arg1, 0);
5090 }
5091 break;
5092 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5093 case TARGET_NR_futimesat:
5094 {
5095 struct timeval *tvp, tv[2];
5096 if (arg3) {
5097 if (copy_from_user_timeval(&tv[0], arg3)
5098 || copy_from_user_timeval(&tv[1],
5099 arg3 + sizeof(struct target_timeval)))
5100 goto efault;
5101 tvp = tv;
5102 } else {
5103 tvp = NULL;
5104 }
5105 if (!(p = lock_user_string(arg2)))
5106 goto efault;
5107 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5108 unlock_user(p, arg2, 0);
5109 }
5110 break;
5111 #endif
5112 #ifdef TARGET_NR_stty
5113 case TARGET_NR_stty:
5114 goto unimplemented;
5115 #endif
5116 #ifdef TARGET_NR_gtty
5117 case TARGET_NR_gtty:
5118 goto unimplemented;
5119 #endif
5120 case TARGET_NR_access:
5121 if (!(p = lock_user_string(arg1)))
5122 goto efault;
5123 ret = get_errno(access(path(p), arg2));
5124 unlock_user(p, arg1, 0);
5125 break;
5126 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5127 case TARGET_NR_faccessat:
5128 if (!(p = lock_user_string(arg2)))
5129 goto efault;
5130 ret = get_errno(sys_faccessat(arg1, p, arg3));
5131 unlock_user(p, arg2, 0);
5132 break;
5133 #endif
5134 #ifdef TARGET_NR_nice /* not on alpha */
5135 case TARGET_NR_nice:
5136 ret = get_errno(nice(arg1));
5137 break;
5138 #endif
5139 #ifdef TARGET_NR_ftime
5140 case TARGET_NR_ftime:
5141 goto unimplemented;
5142 #endif
5143 case TARGET_NR_sync:
5144 sync();
5145 ret = 0;
5146 break;
5147 case TARGET_NR_kill:
5148 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5149 break;
5150 case TARGET_NR_rename:
5151 {
5152 void *p2;
5153 p = lock_user_string(arg1);
5154 p2 = lock_user_string(arg2);
5155 if (!p || !p2)
5156 ret = -TARGET_EFAULT;
5157 else
5158 ret = get_errno(rename(p, p2));
5159 unlock_user(p2, arg2, 0);
5160 unlock_user(p, arg1, 0);
5161 }
5162 break;
5163 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5164 case TARGET_NR_renameat:
5165 {
5166 void *p2;
5167 p = lock_user_string(arg2);
5168 p2 = lock_user_string(arg4);
5169 if (!p || !p2)
5170 ret = -TARGET_EFAULT;
5171 else
5172 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5173 unlock_user(p2, arg4, 0);
5174 unlock_user(p, arg2, 0);
5175 }
5176 break;
5177 #endif
5178 case TARGET_NR_mkdir:
5179 if (!(p = lock_user_string(arg1)))
5180 goto efault;
5181 ret = get_errno(mkdir(p, arg2));
5182 unlock_user(p, arg1, 0);
5183 break;
5184 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5185 case TARGET_NR_mkdirat:
5186 if (!(p = lock_user_string(arg2)))
5187 goto efault;
5188 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5189 unlock_user(p, arg2, 0);
5190 break;
5191 #endif
5192 case TARGET_NR_rmdir:
5193 if (!(p = lock_user_string(arg1)))
5194 goto efault;
5195 ret = get_errno(rmdir(p));
5196 unlock_user(p, arg1, 0);
5197 break;
5198 case TARGET_NR_dup:
5199 ret = get_errno(dup(arg1));
5200 break;
5201 case TARGET_NR_pipe:
5202 ret = do_pipe(cpu_env, arg1, 0, 0);
5203 break;
5204 #ifdef TARGET_NR_pipe2
5205 case TARGET_NR_pipe2:
5206 ret = do_pipe(cpu_env, arg1, arg2, 1);
5207 break;
5208 #endif
5209 case TARGET_NR_times:
5210 {
5211 struct target_tms *tmsp;
5212 struct tms tms;
5213 ret = get_errno(times(&tms));
5214 if (arg1) {
5215 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5216 if (!tmsp)
5217 goto efault;
5218 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5219 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5220 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5221 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5222 }
5223 if (!is_error(ret))
5224 ret = host_to_target_clock_t(ret);
5225 }
5226 break;
5227 #ifdef TARGET_NR_prof
5228 case TARGET_NR_prof:
5229 goto unimplemented;
5230 #endif
5231 #ifdef TARGET_NR_signal
5232 case TARGET_NR_signal:
5233 goto unimplemented;
5234 #endif
5235 case TARGET_NR_acct:
5236 if (arg1 == 0) {
5237 ret = get_errno(acct(NULL));
5238 } else {
5239 if (!(p = lock_user_string(arg1)))
5240 goto efault;
5241 ret = get_errno(acct(path(p)));
5242 unlock_user(p, arg1, 0);
5243 }
5244 break;
5245 #ifdef TARGET_NR_umount2 /* not on alpha */
5246 case TARGET_NR_umount2:
5247 if (!(p = lock_user_string(arg1)))
5248 goto efault;
5249 ret = get_errno(umount2(p, arg2));
5250 unlock_user(p, arg1, 0);
5251 break;
5252 #endif
5253 #ifdef TARGET_NR_lock
5254 case TARGET_NR_lock:
5255 goto unimplemented;
5256 #endif
5257 case TARGET_NR_ioctl:
5258 ret = do_ioctl(arg1, arg2, arg3);
5259 break;
5260 case TARGET_NR_fcntl:
5261 ret = do_fcntl(arg1, arg2, arg3);
5262 break;
5263 #ifdef TARGET_NR_mpx
5264 case TARGET_NR_mpx:
5265 goto unimplemented;
5266 #endif
5267 case TARGET_NR_setpgid:
5268 ret = get_errno(setpgid(arg1, arg2));
5269 break;
5270 #ifdef TARGET_NR_ulimit
5271 case TARGET_NR_ulimit:
5272 goto unimplemented;
5273 #endif
5274 #ifdef TARGET_NR_oldolduname
5275 case TARGET_NR_oldolduname:
5276 goto unimplemented;
5277 #endif
5278 case TARGET_NR_umask:
5279 ret = get_errno(umask(arg1));
5280 break;
5281 case TARGET_NR_chroot:
5282 if (!(p = lock_user_string(arg1)))
5283 goto efault;
5284 ret = get_errno(chroot(p));
5285 unlock_user(p, arg1, 0);
5286 break;
5287 case TARGET_NR_ustat:
5288 goto unimplemented;
5289 case TARGET_NR_dup2:
5290 ret = get_errno(dup2(arg1, arg2));
5291 break;
5292 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5293 case TARGET_NR_dup3:
5294 ret = get_errno(dup3(arg1, arg2, arg3));
5295 break;
5296 #endif
5297 #ifdef TARGET_NR_getppid /* not on alpha */
5298 case TARGET_NR_getppid:
5299 ret = get_errno(getppid());
5300 break;
5301 #endif
5302 case TARGET_NR_getpgrp:
5303 ret = get_errno(getpgrp());
5304 break;
5305 case TARGET_NR_setsid:
5306 ret = get_errno(setsid());
5307 break;
5308 #ifdef TARGET_NR_sigaction
5309 case TARGET_NR_sigaction:
5310 {
5311 #if defined(TARGET_ALPHA)
5312 struct target_sigaction act, oact, *pact = 0;
5313 struct target_old_sigaction *old_act;
5314 if (arg2) {
5315 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5316 goto efault;
5317 act._sa_handler = old_act->_sa_handler;
5318 target_siginitset(&act.sa_mask, old_act->sa_mask);
5319 act.sa_flags = old_act->sa_flags;
5320 act.sa_restorer = 0;
5321 unlock_user_struct(old_act, arg2, 0);
5322 pact = &act;
5323 }
5324 ret = get_errno(do_sigaction(arg1, pact, &oact));
5325 if (!is_error(ret) && arg3) {
5326 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5327 goto efault;
5328 old_act->_sa_handler = oact._sa_handler;
5329 old_act->sa_mask = oact.sa_mask.sig[0];
5330 old_act->sa_flags = oact.sa_flags;
5331 unlock_user_struct(old_act, arg3, 1);
5332 }
5333 #elif defined(TARGET_MIPS)
5334 struct target_sigaction act, oact, *pact, *old_act;
5335
5336 if (arg2) {
5337 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5338 goto efault;
5339 act._sa_handler = old_act->_sa_handler;
5340 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5341 act.sa_flags = old_act->sa_flags;
5342 unlock_user_struct(old_act, arg2, 0);
5343 pact = &act;
5344 } else {
5345 pact = NULL;
5346 }
5347
5348 ret = get_errno(do_sigaction(arg1, pact, &oact));
5349
5350 if (!is_error(ret) && arg3) {
5351 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5352 goto efault;
5353 old_act->_sa_handler = oact._sa_handler;
5354 old_act->sa_flags = oact.sa_flags;
5355 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5356 old_act->sa_mask.sig[1] = 0;
5357 old_act->sa_mask.sig[2] = 0;
5358 old_act->sa_mask.sig[3] = 0;
5359 unlock_user_struct(old_act, arg3, 1);
5360 }
5361 #else
5362 struct target_old_sigaction *old_act;
5363 struct target_sigaction act, oact, *pact;
5364 if (arg2) {
5365 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5366 goto efault;
5367 act._sa_handler = old_act->_sa_handler;
5368 target_siginitset(&act.sa_mask, old_act->sa_mask);
5369 act.sa_flags = old_act->sa_flags;
5370 act.sa_restorer = old_act->sa_restorer;
5371 unlock_user_struct(old_act, arg2, 0);
5372 pact = &act;
5373 } else {
5374 pact = NULL;
5375 }
5376 ret = get_errno(do_sigaction(arg1, pact, &oact));
5377 if (!is_error(ret) && arg3) {
5378 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5379 goto efault;
5380 old_act->_sa_handler = oact._sa_handler;
5381 old_act->sa_mask = oact.sa_mask.sig[0];
5382 old_act->sa_flags = oact.sa_flags;
5383 old_act->sa_restorer = oact.sa_restorer;
5384 unlock_user_struct(old_act, arg3, 1);
5385 }
5386 #endif
5387 }
5388 break;
5389 #endif
5390 case TARGET_NR_rt_sigaction:
5391 {
5392 #if defined(TARGET_ALPHA)
5393 struct target_sigaction act, oact, *pact = 0;
5394 struct target_rt_sigaction *rt_act;
5395 /* ??? arg4 == sizeof(sigset_t). */
5396 if (arg2) {
5397 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5398 goto efault;
5399 act._sa_handler = rt_act->_sa_handler;
5400 act.sa_mask = rt_act->sa_mask;
5401 act.sa_flags = rt_act->sa_flags;
5402 act.sa_restorer = arg5;
5403 unlock_user_struct(rt_act, arg2, 0);
5404 pact = &act;
5405 }
5406 ret = get_errno(do_sigaction(arg1, pact, &oact));
5407 if (!is_error(ret) && arg3) {
5408 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5409 goto efault;
5410 rt_act->_sa_handler = oact._sa_handler;
5411 rt_act->sa_mask = oact.sa_mask;
5412 rt_act->sa_flags = oact.sa_flags;
5413 unlock_user_struct(rt_act, arg3, 1);
5414 }
5415 #else
5416 struct target_sigaction *act;
5417 struct target_sigaction *oact;
5418
5419 if (arg2) {
5420 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5421 goto efault;
5422 } else
5423 act = NULL;
5424 if (arg3) {
5425 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5426 ret = -TARGET_EFAULT;
5427 goto rt_sigaction_fail;
5428 }
5429 } else
5430 oact = NULL;
5431 ret = get_errno(do_sigaction(arg1, act, oact));
5432 rt_sigaction_fail:
5433 if (act)
5434 unlock_user_struct(act, arg2, 0);
5435 if (oact)
5436 unlock_user_struct(oact, arg3, 1);
5437 #endif
5438 }
5439 break;
5440 #ifdef TARGET_NR_sgetmask /* not on alpha */
5441 case TARGET_NR_sgetmask:
5442 {
5443 sigset_t cur_set;
5444 abi_ulong target_set;
5445 sigprocmask(0, NULL, &cur_set);
5446 host_to_target_old_sigset(&target_set, &cur_set);
5447 ret = target_set;
5448 }
5449 break;
5450 #endif
5451 #ifdef TARGET_NR_ssetmask /* not on alpha */
5452 case TARGET_NR_ssetmask:
5453 {
5454 sigset_t set, oset, cur_set;
5455 abi_ulong target_set = arg1;
5456 sigprocmask(0, NULL, &cur_set);
5457 target_to_host_old_sigset(&set, &target_set);
5458 sigorset(&set, &set, &cur_set);
5459 sigprocmask(SIG_SETMASK, &set, &oset);
5460 host_to_target_old_sigset(&target_set, &oset);
5461 ret = target_set;
5462 }
5463 break;
5464 #endif
5465 #ifdef TARGET_NR_sigprocmask
5466 case TARGET_NR_sigprocmask:
5467 {
5468 #if defined(TARGET_ALPHA)
5469 sigset_t set, oldset;
5470 abi_ulong mask;
5471 int how;
5472
5473 switch (arg1) {
5474 case TARGET_SIG_BLOCK:
5475 how = SIG_BLOCK;
5476 break;
5477 case TARGET_SIG_UNBLOCK:
5478 how = SIG_UNBLOCK;
5479 break;
5480 case TARGET_SIG_SETMASK:
5481 how = SIG_SETMASK;
5482 break;
5483 default:
5484 ret = -TARGET_EINVAL;
5485 goto fail;
5486 }
5487 mask = arg2;
5488 target_to_host_old_sigset(&set, &mask);
5489
5490 ret = get_errno(sigprocmask(how, &set, &oldset));
5491
5492 if (!is_error(ret)) {
5493 host_to_target_old_sigset(&mask, &oldset);
5494 ret = mask;
5495 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5496 }
5497 #else
5498 sigset_t set, oldset, *set_ptr;
5499 int how;
5500
5501 if (arg2) {
5502 switch (arg1) {
5503 case TARGET_SIG_BLOCK:
5504 how = SIG_BLOCK;
5505 break;
5506 case TARGET_SIG_UNBLOCK:
5507 how = SIG_UNBLOCK;
5508 break;
5509 case TARGET_SIG_SETMASK:
5510 how = SIG_SETMASK;
5511 break;
5512 default:
5513 ret = -TARGET_EINVAL;
5514 goto fail;
5515 }
5516 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5517 goto efault;
5518 target_to_host_old_sigset(&set, p);
5519 unlock_user(p, arg2, 0);
5520 set_ptr = &set;
5521 } else {
5522 how = 0;
5523 set_ptr = NULL;
5524 }
5525 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5526 if (!is_error(ret) && arg3) {
5527 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5528 goto efault;
5529 host_to_target_old_sigset(p, &oldset);
5530 unlock_user(p, arg3, sizeof(target_sigset_t));
5531 }
5532 #endif
5533 }
5534 break;
5535 #endif
5536 case TARGET_NR_rt_sigprocmask:
5537 {
5538 int how = arg1;
5539 sigset_t set, oldset, *set_ptr;
5540
5541 if (arg2) {
5542 switch(how) {
5543 case TARGET_SIG_BLOCK:
5544 how = SIG_BLOCK;
5545 break;
5546 case TARGET_SIG_UNBLOCK:
5547 how = SIG_UNBLOCK;
5548 break;
5549 case TARGET_SIG_SETMASK:
5550 how = SIG_SETMASK;
5551 break;
5552 default:
5553 ret = -TARGET_EINVAL;
5554 goto fail;
5555 }
5556 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5557 goto efault;
5558 target_to_host_sigset(&set, p);
5559 unlock_user(p, arg2, 0);
5560 set_ptr = &set;
5561 } else {
5562 how = 0;
5563 set_ptr = NULL;
5564 }
5565 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5566 if (!is_error(ret) && arg3) {
5567 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5568 goto efault;
5569 host_to_target_sigset(p, &oldset);
5570 unlock_user(p, arg3, sizeof(target_sigset_t));
5571 }
5572 }
5573 break;
5574 #ifdef TARGET_NR_sigpending
5575 case TARGET_NR_sigpending:
5576 {
5577 sigset_t set;
5578 ret = get_errno(sigpending(&set));
5579 if (!is_error(ret)) {
5580 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5581 goto efault;
5582 host_to_target_old_sigset(p, &set);
5583 unlock_user(p, arg1, sizeof(target_sigset_t));
5584 }
5585 }
5586 break;
5587 #endif
5588 case TARGET_NR_rt_sigpending:
5589 {
5590 sigset_t set;
5591 ret = get_errno(sigpending(&set));
5592 if (!is_error(ret)) {
5593 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5594 goto efault;
5595 host_to_target_sigset(p, &set);
5596 unlock_user(p, arg1, sizeof(target_sigset_t));
5597 }
5598 }
5599 break;
5600 #ifdef TARGET_NR_sigsuspend
5601 case TARGET_NR_sigsuspend:
5602 {
5603 sigset_t set;
5604 #if defined(TARGET_ALPHA)
5605 abi_ulong mask = arg1;
5606 target_to_host_old_sigset(&set, &mask);
5607 #else
5608 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5609 goto efault;
5610 target_to_host_old_sigset(&set, p);
5611 unlock_user(p, arg1, 0);
5612 #endif
5613 ret = get_errno(sigsuspend(&set));
5614 }
5615 break;
5616 #endif
5617 case TARGET_NR_rt_sigsuspend:
5618 {
5619 sigset_t set;
5620 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5621 goto efault;
5622 target_to_host_sigset(&set, p);
5623 unlock_user(p, arg1, 0);
5624 ret = get_errno(sigsuspend(&set));
5625 }
5626 break;
5627 case TARGET_NR_rt_sigtimedwait:
5628 {
5629 sigset_t set;
5630 struct timespec uts, *puts;
5631 siginfo_t uinfo;
5632
5633 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5634 goto efault;
5635 target_to_host_sigset(&set, p);
5636 unlock_user(p, arg1, 0);
5637 if (arg3) {
5638 puts = &uts;
5639 target_to_host_timespec(puts, arg3);
5640 } else {
5641 puts = NULL;
5642 }
5643 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5644 if (!is_error(ret) && arg2) {
5645 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5646 goto efault;
5647 host_to_target_siginfo(p, &uinfo);
5648 unlock_user(p, arg2, sizeof(target_siginfo_t));
5649 }
5650 }
5651 break;
5652 case TARGET_NR_rt_sigqueueinfo:
5653 {
5654 siginfo_t uinfo;
5655 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5656 goto efault;
5657 target_to_host_siginfo(&uinfo, p);
5658 unlock_user(p, arg1, 0);
5659 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5660 }
5661 break;
5662 #ifdef TARGET_NR_sigreturn
5663 case TARGET_NR_sigreturn:
5664 /* NOTE: ret is eax, so not transcoding must be done */
5665 ret = do_sigreturn(cpu_env);
5666 break;
5667 #endif
5668 case TARGET_NR_rt_sigreturn:
5669 /* NOTE: ret is eax, so not transcoding must be done */
5670 ret = do_rt_sigreturn(cpu_env);
5671 break;
5672 case TARGET_NR_sethostname:
5673 if (!(p = lock_user_string(arg1)))
5674 goto efault;
5675 ret = get_errno(sethostname(p, arg2));
5676 unlock_user(p, arg1, 0);
5677 break;
5678 case TARGET_NR_setrlimit:
5679 {
5680 int resource = target_to_host_resource(arg1);
5681 struct target_rlimit *target_rlim;
5682 struct rlimit rlim;
5683 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5684 goto efault;
5685 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5686 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5687 unlock_user_struct(target_rlim, arg2, 0);
5688 ret = get_errno(setrlimit(resource, &rlim));
5689 }
5690 break;
5691 case TARGET_NR_getrlimit:
5692 {
5693 int resource = target_to_host_resource(arg1);
5694 struct target_rlimit *target_rlim;
5695 struct rlimit rlim;
5696
5697 ret = get_errno(getrlimit(resource, &rlim));
5698 if (!is_error(ret)) {
5699 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5700 goto efault;
5701 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5702 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5703 unlock_user_struct(target_rlim, arg2, 1);
5704 }
5705 }
5706 break;
5707 case TARGET_NR_getrusage:
5708 {
5709 struct rusage rusage;
5710 ret = get_errno(getrusage(arg1, &rusage));
5711 if (!is_error(ret)) {
5712 host_to_target_rusage(arg2, &rusage);
5713 }
5714 }
5715 break;
5716 case TARGET_NR_gettimeofday:
5717 {
5718 struct timeval tv;
5719 ret = get_errno(gettimeofday(&tv, NULL));
5720 if (!is_error(ret)) {
5721 if (copy_to_user_timeval(arg1, &tv))
5722 goto efault;
5723 }
5724 }
5725 break;
5726 case TARGET_NR_settimeofday:
5727 {
5728 struct timeval tv;
5729 if (copy_from_user_timeval(&tv, arg1))
5730 goto efault;
5731 ret = get_errno(settimeofday(&tv, NULL));
5732 }
5733 break;
5734 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5735 case TARGET_NR_select:
5736 {
5737 struct target_sel_arg_struct *sel;
5738 abi_ulong inp, outp, exp, tvp;
5739 long nsel;
5740
5741 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5742 goto efault;
5743 nsel = tswapal(sel->n);
5744 inp = tswapal(sel->inp);
5745 outp = tswapal(sel->outp);
5746 exp = tswapal(sel->exp);
5747 tvp = tswapal(sel->tvp);
5748 unlock_user_struct(sel, arg1, 0);
5749 ret = do_select(nsel, inp, outp, exp, tvp);
5750 }
5751 break;
5752 #endif
5753 #ifdef TARGET_NR_pselect6
5754 case TARGET_NR_pselect6:
5755 {
5756 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
5757 fd_set rfds, wfds, efds;
5758 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
5759 struct timespec ts, *ts_ptr;
5760
5761 /*
5762 * The 6th arg is actually two args smashed together,
5763 * so we cannot use the C library.
5764 */
5765 sigset_t set;
5766 struct {
5767 sigset_t *set;
5768 size_t size;
5769 } sig, *sig_ptr;
5770
5771 abi_ulong arg_sigset, arg_sigsize, *arg7;
5772 target_sigset_t *target_sigset;
5773
5774 n = arg1;
5775 rfd_addr = arg2;
5776 wfd_addr = arg3;
5777 efd_addr = arg4;
5778 ts_addr = arg5;
5779
5780 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
5781 if (ret) {
5782 goto fail;
5783 }
5784 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
5785 if (ret) {
5786 goto fail;
5787 }
5788 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
5789 if (ret) {
5790 goto fail;
5791 }
5792
5793 /*
5794 * This takes a timespec, and not a timeval, so we cannot
5795 * use the do_select() helper ...
5796 */
5797 if (ts_addr) {
5798 if (target_to_host_timespec(&ts, ts_addr)) {
5799 goto efault;
5800 }
5801 ts_ptr = &ts;
5802 } else {
5803 ts_ptr = NULL;
5804 }
5805
5806 /* Extract the two packed args for the sigset */
5807 if (arg6) {
5808 sig_ptr = &sig;
5809 sig.size = _NSIG / 8;
5810
5811 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
5812 if (!arg7) {
5813 goto efault;
5814 }
5815 arg_sigset = tswapal(arg7[0]);
5816 arg_sigsize = tswapal(arg7[1]);
5817 unlock_user(arg7, arg6, 0);
5818
5819 if (arg_sigset) {
5820 sig.set = &set;
5821 if (arg_sigsize != sizeof(*target_sigset)) {
5822 /* Like the kernel, we enforce correct size sigsets */
5823 ret = -TARGET_EINVAL;
5824 goto fail;
5825 }
5826 target_sigset = lock_user(VERIFY_READ, arg_sigset,
5827 sizeof(*target_sigset), 1);
5828 if (!target_sigset) {
5829 goto efault;
5830 }
5831 target_to_host_sigset(&set, target_sigset);
5832 unlock_user(target_sigset, arg_sigset, 0);
5833 } else {
5834 sig.set = NULL;
5835 }
5836 } else {
5837 sig_ptr = NULL;
5838 }
5839
5840 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
5841 ts_ptr, sig_ptr));
5842
5843 if (!is_error(ret)) {
5844 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
5845 goto efault;
5846 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
5847 goto efault;
5848 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
5849 goto efault;
5850
5851 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
5852 goto efault;
5853 }
5854 }
5855 break;
5856 #endif
5857 case TARGET_NR_symlink:
5858 {
5859 void *p2;
5860 p = lock_user_string(arg1);
5861 p2 = lock_user_string(arg2);
5862 if (!p || !p2)
5863 ret = -TARGET_EFAULT;
5864 else
5865 ret = get_errno(symlink(p, p2));
5866 unlock_user(p2, arg2, 0);
5867 unlock_user(p, arg1, 0);
5868 }
5869 break;
5870 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5871 case TARGET_NR_symlinkat:
5872 {
5873 void *p2;
5874 p = lock_user_string(arg1);
5875 p2 = lock_user_string(arg3);
5876 if (!p || !p2)
5877 ret = -TARGET_EFAULT;
5878 else
5879 ret = get_errno(sys_symlinkat(p, arg2, p2));
5880 unlock_user(p2, arg3, 0);
5881 unlock_user(p, arg1, 0);
5882 }
5883 break;
5884 #endif
5885 #ifdef TARGET_NR_oldlstat
5886 case TARGET_NR_oldlstat:
5887 goto unimplemented;
5888 #endif
5889 case TARGET_NR_readlink:
5890 {
5891 void *p2, *temp;
5892 p = lock_user_string(arg1);
5893 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5894 if (!p || !p2)
5895 ret = -TARGET_EFAULT;
5896 else {
5897 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5898 char real[PATH_MAX];
5899 temp = realpath(exec_path,real);
5900 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5901 snprintf((char *)p2, arg3, "%s", real);
5902 }
5903 else
5904 ret = get_errno(readlink(path(p), p2, arg3));
5905 }
5906 unlock_user(p2, arg2, ret);
5907 unlock_user(p, arg1, 0);
5908 }
5909 break;
5910 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5911 case TARGET_NR_readlinkat:
5912 {
5913 void *p2;
5914 p = lock_user_string(arg2);
5915 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5916 if (!p || !p2)
5917 ret = -TARGET_EFAULT;
5918 else
5919 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5920 unlock_user(p2, arg3, ret);
5921 unlock_user(p, arg2, 0);
5922 }
5923 break;
5924 #endif
5925 #ifdef TARGET_NR_uselib
5926 case TARGET_NR_uselib:
5927 goto unimplemented;
5928 #endif
5929 #ifdef TARGET_NR_swapon
5930 case TARGET_NR_swapon:
5931 if (!(p = lock_user_string(arg1)))
5932 goto efault;
5933 ret = get_errno(swapon(p, arg2));
5934 unlock_user(p, arg1, 0);
5935 break;
5936 #endif
5937 case TARGET_NR_reboot:
5938 if (!(p = lock_user_string(arg4)))
5939 goto efault;
5940 ret = reboot(arg1, arg2, arg3, p);
5941 unlock_user(p, arg4, 0);
5942 break;
5943 #ifdef TARGET_NR_readdir
5944 case TARGET_NR_readdir:
5945 goto unimplemented;
5946 #endif
5947 #ifdef TARGET_NR_mmap
5948 case TARGET_NR_mmap:
5949 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5950 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5951 || defined(TARGET_S390X)
5952 {
5953 abi_ulong *v;
5954 abi_ulong v1, v2, v3, v4, v5, v6;
5955 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5956 goto efault;
5957 v1 = tswapal(v[0]);
5958 v2 = tswapal(v[1]);
5959 v3 = tswapal(v[2]);
5960 v4 = tswapal(v[3]);
5961 v5 = tswapal(v[4]);
5962 v6 = tswapal(v[5]);
5963 unlock_user(v, arg1, 0);
5964 ret = get_errno(target_mmap(v1, v2, v3,
5965 target_to_host_bitmask(v4, mmap_flags_tbl),
5966 v5, v6));
5967 }
5968 #else
5969 ret = get_errno(target_mmap(arg1, arg2, arg3,
5970 target_to_host_bitmask(arg4, mmap_flags_tbl),
5971 arg5,
5972 arg6));
5973 #endif
5974 break;
5975 #endif
5976 #ifdef TARGET_NR_mmap2
5977 case TARGET_NR_mmap2:
5978 #ifndef MMAP_SHIFT
5979 #define MMAP_SHIFT 12
5980 #endif
5981 ret = get_errno(target_mmap(arg1, arg2, arg3,
5982 target_to_host_bitmask(arg4, mmap_flags_tbl),
5983 arg5,
5984 arg6 << MMAP_SHIFT));
5985 break;
5986 #endif
5987 case TARGET_NR_munmap:
5988 ret = get_errno(target_munmap(arg1, arg2));
5989 break;
5990 case TARGET_NR_mprotect:
5991 {
5992 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5993 /* Special hack to detect libc making the stack executable. */
5994 if ((arg3 & PROT_GROWSDOWN)
5995 && arg1 >= ts->info->stack_limit
5996 && arg1 <= ts->info->start_stack) {
5997 arg3 &= ~PROT_GROWSDOWN;
5998 arg2 = arg2 + arg1 - ts->info->stack_limit;
5999 arg1 = ts->info->stack_limit;
6000 }
6001 }
6002 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6003 break;
6004 #ifdef TARGET_NR_mremap
6005 case TARGET_NR_mremap:
6006 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6007 break;
6008 #endif
6009 /* ??? msync/mlock/munlock are broken for softmmu. */
6010 #ifdef TARGET_NR_msync
6011 case TARGET_NR_msync:
6012 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6013 break;
6014 #endif
6015 #ifdef TARGET_NR_mlock
6016 case TARGET_NR_mlock:
6017 ret = get_errno(mlock(g2h(arg1), arg2));
6018 break;
6019 #endif
6020 #ifdef TARGET_NR_munlock
6021 case TARGET_NR_munlock:
6022 ret = get_errno(munlock(g2h(arg1), arg2));
6023 break;
6024 #endif
6025 #ifdef TARGET_NR_mlockall
6026 case TARGET_NR_mlockall:
6027 ret = get_errno(mlockall(arg1));
6028 break;
6029 #endif
6030 #ifdef TARGET_NR_munlockall
6031 case TARGET_NR_munlockall:
6032 ret = get_errno(munlockall());
6033 break;
6034 #endif
6035 case TARGET_NR_truncate:
6036 if (!(p = lock_user_string(arg1)))
6037 goto efault;
6038 ret = get_errno(truncate(p, arg2));
6039 unlock_user(p, arg1, 0);
6040 break;
6041 case TARGET_NR_ftruncate:
6042 ret = get_errno(ftruncate(arg1, arg2));
6043 break;
6044 case TARGET_NR_fchmod:
6045 ret = get_errno(fchmod(arg1, arg2));
6046 break;
6047 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6048 case TARGET_NR_fchmodat:
6049 if (!(p = lock_user_string(arg2)))
6050 goto efault;
6051 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6052 unlock_user(p, arg2, 0);
6053 break;
6054 #endif
6055 case TARGET_NR_getpriority:
6056 /* libc does special remapping of the return value of
6057 * sys_getpriority() so it's just easiest to call
6058 * sys_getpriority() directly rather than through libc. */
6059 ret = get_errno(sys_getpriority(arg1, arg2));
6060 break;
6061 case TARGET_NR_setpriority:
6062 ret = get_errno(setpriority(arg1, arg2, arg3));
6063 break;
6064 #ifdef TARGET_NR_profil
6065 case TARGET_NR_profil:
6066 goto unimplemented;
6067 #endif
6068 case TARGET_NR_statfs:
6069 if (!(p = lock_user_string(arg1)))
6070 goto efault;
6071 ret = get_errno(statfs(path(p), &stfs));
6072 unlock_user(p, arg1, 0);
6073 convert_statfs:
6074 if (!is_error(ret)) {
6075 struct target_statfs *target_stfs;
6076
6077 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6078 goto efault;
6079 __put_user(stfs.f_type, &target_stfs->f_type);
6080 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6081 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6082 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6083 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6084 __put_user(stfs.f_files, &target_stfs->f_files);
6085 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6086 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6087 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6088 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6089 unlock_user_struct(target_stfs, arg2, 1);
6090 }
6091 break;
6092 case TARGET_NR_fstatfs:
6093 ret = get_errno(fstatfs(arg1, &stfs));
6094 goto convert_statfs;
6095 #ifdef TARGET_NR_statfs64
6096 case TARGET_NR_statfs64:
6097 if (!(p = lock_user_string(arg1)))
6098 goto efault;
6099 ret = get_errno(statfs(path(p), &stfs));
6100 unlock_user(p, arg1, 0);
6101 convert_statfs64:
6102 if (!is_error(ret)) {
6103 struct target_statfs64 *target_stfs;
6104
6105 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6106 goto efault;
6107 __put_user(stfs.f_type, &target_stfs->f_type);
6108 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6109 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6110 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6111 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6112 __put_user(stfs.f_files, &target_stfs->f_files);
6113 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6114 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6115 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6116 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6117 unlock_user_struct(target_stfs, arg3, 1);
6118 }
6119 break;
6120 case TARGET_NR_fstatfs64:
6121 ret = get_errno(fstatfs(arg1, &stfs));
6122 goto convert_statfs64;
6123 #endif
6124 #ifdef TARGET_NR_ioperm
6125 case TARGET_NR_ioperm:
6126 goto unimplemented;
6127 #endif
6128 #ifdef TARGET_NR_socketcall
6129 case TARGET_NR_socketcall:
6130 ret = do_socketcall(arg1, arg2);
6131 break;
6132 #endif
6133 #ifdef TARGET_NR_accept
6134 case TARGET_NR_accept:
6135 ret = do_accept(arg1, arg2, arg3);
6136 break;
6137 #endif
6138 #ifdef TARGET_NR_bind
6139 case TARGET_NR_bind:
6140 ret = do_bind(arg1, arg2, arg3);
6141 break;
6142 #endif
6143 #ifdef TARGET_NR_connect
6144 case TARGET_NR_connect:
6145 ret = do_connect(arg1, arg2, arg3);
6146 break;
6147 #endif
6148 #ifdef TARGET_NR_getpeername
6149 case TARGET_NR_getpeername:
6150 ret = do_getpeername(arg1, arg2, arg3);
6151 break;
6152 #endif
6153 #ifdef TARGET_NR_getsockname
6154 case TARGET_NR_getsockname:
6155 ret = do_getsockname(arg1, arg2, arg3);
6156 break;
6157 #endif
6158 #ifdef TARGET_NR_getsockopt
6159 case TARGET_NR_getsockopt:
6160 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6161 break;
6162 #endif
6163 #ifdef TARGET_NR_listen
6164 case TARGET_NR_listen:
6165 ret = get_errno(listen(arg1, arg2));
6166 break;
6167 #endif
6168 #ifdef TARGET_NR_recv
6169 case TARGET_NR_recv:
6170 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6171 break;
6172 #endif
6173 #ifdef TARGET_NR_recvfrom
6174 case TARGET_NR_recvfrom:
6175 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6176 break;
6177 #endif
6178 #ifdef TARGET_NR_recvmsg
6179 case TARGET_NR_recvmsg:
6180 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6181 break;
6182 #endif
6183 #ifdef TARGET_NR_send
6184 case TARGET_NR_send:
6185 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6186 break;
6187 #endif
6188 #ifdef TARGET_NR_sendmsg
6189 case TARGET_NR_sendmsg:
6190 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6191 break;
6192 #endif
6193 #ifdef TARGET_NR_sendto
6194 case TARGET_NR_sendto:
6195 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6196 break;
6197 #endif
6198 #ifdef TARGET_NR_shutdown
6199 case TARGET_NR_shutdown:
6200 ret = get_errno(shutdown(arg1, arg2));
6201 break;
6202 #endif
6203 #ifdef TARGET_NR_socket
6204 case TARGET_NR_socket:
6205 ret = do_socket(arg1, arg2, arg3);
6206 break;
6207 #endif
6208 #ifdef TARGET_NR_socketpair
6209 case TARGET_NR_socketpair:
6210 ret = do_socketpair(arg1, arg2, arg3, arg4);
6211 break;
6212 #endif
6213 #ifdef TARGET_NR_setsockopt
6214 case TARGET_NR_setsockopt:
6215 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6216 break;
6217 #endif
6218
6219 case TARGET_NR_syslog:
6220 if (!(p = lock_user_string(arg2)))
6221 goto efault;
6222 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6223 unlock_user(p, arg2, 0);
6224 break;
6225
6226 case TARGET_NR_setitimer:
6227 {
6228 struct itimerval value, ovalue, *pvalue;
6229
6230 if (arg2) {
6231 pvalue = &value;
6232 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6233 || copy_from_user_timeval(&pvalue->it_value,
6234 arg2 + sizeof(struct target_timeval)))
6235 goto efault;
6236 } else {
6237 pvalue = NULL;
6238 }
6239 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6240 if (!is_error(ret) && arg3) {
6241 if (copy_to_user_timeval(arg3,
6242 &ovalue.it_interval)
6243 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6244 &ovalue.it_value))
6245 goto efault;
6246 }
6247 }
6248 break;
6249 case TARGET_NR_getitimer:
6250 {
6251 struct itimerval value;
6252
6253 ret = get_errno(getitimer(arg1, &value));
6254 if (!is_error(ret) && arg2) {
6255 if (copy_to_user_timeval(arg2,
6256 &value.it_interval)
6257 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6258 &value.it_value))
6259 goto efault;
6260 }
6261 }
6262 break;
6263 case TARGET_NR_stat:
6264 if (!(p = lock_user_string(arg1)))
6265 goto efault;
6266 ret = get_errno(stat(path(p), &st));
6267 unlock_user(p, arg1, 0);
6268 goto do_stat;
6269 case TARGET_NR_lstat:
6270 if (!(p = lock_user_string(arg1)))
6271 goto efault;
6272 ret = get_errno(lstat(path(p), &st));
6273 unlock_user(p, arg1, 0);
6274 goto do_stat;
6275 case TARGET_NR_fstat:
6276 {
6277 ret = get_errno(fstat(arg1, &st));
6278 do_stat:
6279 if (!is_error(ret)) {
6280 struct target_stat *target_st;
6281
6282 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6283 goto efault;
6284 memset(target_st, 0, sizeof(*target_st));
6285 __put_user(st.st_dev, &target_st->st_dev);
6286 __put_user(st.st_ino, &target_st->st_ino);
6287 __put_user(st.st_mode, &target_st->st_mode);
6288 __put_user(st.st_uid, &target_st->st_uid);
6289 __put_user(st.st_gid, &target_st->st_gid);
6290 __put_user(st.st_nlink, &target_st->st_nlink);
6291 __put_user(st.st_rdev, &target_st->st_rdev);
6292 __put_user(st.st_size, &target_st->st_size);
6293 __put_user(st.st_blksize, &target_st->st_blksize);
6294 __put_user(st.st_blocks, &target_st->st_blocks);
6295 __put_user(st.st_atime, &target_st->target_st_atime);
6296 __put_user(st.st_mtime, &target_st->target_st_mtime);
6297 __put_user(st.st_ctime, &target_st->target_st_ctime);
6298 unlock_user_struct(target_st, arg2, 1);
6299 }
6300 }
6301 break;
6302 #ifdef TARGET_NR_olduname
6303 case TARGET_NR_olduname:
6304 goto unimplemented;
6305 #endif
6306 #ifdef TARGET_NR_iopl
6307 case TARGET_NR_iopl:
6308 goto unimplemented;
6309 #endif
6310 case TARGET_NR_vhangup:
6311 ret = get_errno(vhangup());
6312 break;
6313 #ifdef TARGET_NR_idle
6314 case TARGET_NR_idle:
6315 goto unimplemented;
6316 #endif
6317 #ifdef TARGET_NR_syscall
6318 case TARGET_NR_syscall:
6319 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6320 arg6, arg7, arg8, 0);
6321 break;
6322 #endif
6323 case TARGET_NR_wait4:
6324 {
6325 int status;
6326 abi_long status_ptr = arg2;
6327 struct rusage rusage, *rusage_ptr;
6328 abi_ulong target_rusage = arg4;
6329 if (target_rusage)
6330 rusage_ptr = &rusage;
6331 else
6332 rusage_ptr = NULL;
6333 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6334 if (!is_error(ret)) {
6335 if (status_ptr) {
6336 status = host_to_target_waitstatus(status);
6337 if (put_user_s32(status, status_ptr))
6338 goto efault;
6339 }
6340 if (target_rusage)
6341 host_to_target_rusage(target_rusage, &rusage);
6342 }
6343 }
6344 break;
6345 #ifdef TARGET_NR_swapoff
6346 case TARGET_NR_swapoff:
6347 if (!(p = lock_user_string(arg1)))
6348 goto efault;
6349 ret = get_errno(swapoff(p));
6350 unlock_user(p, arg1, 0);
6351 break;
6352 #endif
6353 case TARGET_NR_sysinfo:
6354 {
6355 struct target_sysinfo *target_value;
6356 struct sysinfo value;
6357 ret = get_errno(sysinfo(&value));
6358 if (!is_error(ret) && arg1)
6359 {
6360 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6361 goto efault;
6362 __put_user(value.uptime, &target_value->uptime);
6363 __put_user(value.loads[0], &target_value->loads[0]);
6364 __put_user(value.loads[1], &target_value->loads[1]);
6365 __put_user(value.loads[2], &target_value->loads[2]);
6366 __put_user(value.totalram, &target_value->totalram);
6367 __put_user(value.freeram, &target_value->freeram);
6368 __put_user(value.sharedram, &target_value->sharedram);
6369 __put_user(value.bufferram, &target_value->bufferram);
6370 __put_user(value.totalswap, &target_value->totalswap);
6371 __put_user(value.freeswap, &target_value->freeswap);
6372 __put_user(value.procs, &target_value->procs);
6373 __put_user(value.totalhigh, &target_value->totalhigh);
6374 __put_user(value.freehigh, &target_value->freehigh);
6375 __put_user(value.mem_unit, &target_value->mem_unit);
6376 unlock_user_struct(target_value, arg1, 1);
6377 }
6378 }
6379 break;
6380 #ifdef TARGET_NR_ipc
6381 case TARGET_NR_ipc:
6382 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6383 break;
6384 #endif
6385 #ifdef TARGET_NR_semget
6386 case TARGET_NR_semget:
6387 ret = get_errno(semget(arg1, arg2, arg3));
6388 break;
6389 #endif
6390 #ifdef TARGET_NR_semop
6391 case TARGET_NR_semop:
6392 ret = get_errno(do_semop(arg1, arg2, arg3));
6393 break;
6394 #endif
6395 #ifdef TARGET_NR_semctl
6396 case TARGET_NR_semctl:
6397 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6398 break;
6399 #endif
6400 #ifdef TARGET_NR_msgctl
6401 case TARGET_NR_msgctl:
6402 ret = do_msgctl(arg1, arg2, arg3);
6403 break;
6404 #endif
6405 #ifdef TARGET_NR_msgget
6406 case TARGET_NR_msgget:
6407 ret = get_errno(msgget(arg1, arg2));
6408 break;
6409 #endif
6410 #ifdef TARGET_NR_msgrcv
6411 case TARGET_NR_msgrcv:
6412 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6413 break;
6414 #endif
6415 #ifdef TARGET_NR_msgsnd
6416 case TARGET_NR_msgsnd:
6417 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6418 break;
6419 #endif
6420 #ifdef TARGET_NR_shmget
6421 case TARGET_NR_shmget:
6422 ret = get_errno(shmget(arg1, arg2, arg3));
6423 break;
6424 #endif
6425 #ifdef TARGET_NR_shmctl
6426 case TARGET_NR_shmctl:
6427 ret = do_shmctl(arg1, arg2, arg3);
6428 break;
6429 #endif
6430 #ifdef TARGET_NR_shmat
6431 case TARGET_NR_shmat:
6432 ret = do_shmat(arg1, arg2, arg3);
6433 break;
6434 #endif
6435 #ifdef TARGET_NR_shmdt
6436 case TARGET_NR_shmdt:
6437 ret = do_shmdt(arg1);
6438 break;
6439 #endif
6440 case TARGET_NR_fsync:
6441 ret = get_errno(fsync(arg1));
6442 break;
6443 case TARGET_NR_clone:
6444 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6445 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6446 #elif defined(TARGET_CRIS)
6447 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6448 #elif defined(TARGET_S390X)
6449 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6450 #else
6451 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6452 #endif
6453 break;
6454 #ifdef __NR_exit_group
6455 /* new thread calls */
6456 case TARGET_NR_exit_group:
6457 #ifdef TARGET_GPROF
6458 _mcleanup();
6459 #endif
6460 gdb_exit(cpu_env, arg1);
6461 ret = get_errno(exit_group(arg1));
6462 break;
6463 #endif
6464 case TARGET_NR_setdomainname:
6465 if (!(p = lock_user_string(arg1)))
6466 goto efault;
6467 ret = get_errno(setdomainname(p, arg2));
6468 unlock_user(p, arg1, 0);
6469 break;
6470 case TARGET_NR_uname:
6471 /* no need to transcode because we use the linux syscall */
6472 {
6473 struct new_utsname * buf;
6474
6475 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6476 goto efault;
6477 ret = get_errno(sys_uname(buf));
6478 if (!is_error(ret)) {
6479 /* Overrite the native machine name with whatever is being
6480 emulated. */
6481 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6482 /* Allow the user to override the reported release. */
6483 if (qemu_uname_release && *qemu_uname_release)
6484 strcpy (buf->release, qemu_uname_release);
6485 }
6486 unlock_user_struct(buf, arg1, 1);
6487 }
6488 break;
6489 #ifdef TARGET_I386
6490 case TARGET_NR_modify_ldt:
6491 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6492 break;
6493 #if !defined(TARGET_X86_64)
6494 case TARGET_NR_vm86old:
6495 goto unimplemented;
6496 case TARGET_NR_vm86:
6497 ret = do_vm86(cpu_env, arg1, arg2);
6498 break;
6499 #endif
6500 #endif
6501 case TARGET_NR_adjtimex:
6502 goto unimplemented;
6503 #ifdef TARGET_NR_create_module
6504 case TARGET_NR_create_module:
6505 #endif
6506 case TARGET_NR_init_module:
6507 case TARGET_NR_delete_module:
6508 #ifdef TARGET_NR_get_kernel_syms
6509 case TARGET_NR_get_kernel_syms:
6510 #endif
6511 goto unimplemented;
6512 case TARGET_NR_quotactl:
6513 goto unimplemented;
6514 case TARGET_NR_getpgid:
6515 ret = get_errno(getpgid(arg1));
6516 break;
6517 case TARGET_NR_fchdir:
6518 ret = get_errno(fchdir(arg1));
6519 break;
6520 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6521 case TARGET_NR_bdflush:
6522 goto unimplemented;
6523 #endif
6524 #ifdef TARGET_NR_sysfs
6525 case TARGET_NR_sysfs:
6526 goto unimplemented;
6527 #endif
6528 case TARGET_NR_personality:
6529 ret = get_errno(personality(arg1));
6530 break;
6531 #ifdef TARGET_NR_afs_syscall
6532 case TARGET_NR_afs_syscall:
6533 goto unimplemented;
6534 #endif
6535 #ifdef TARGET_NR__llseek /* Not on alpha */
6536 case TARGET_NR__llseek:
6537 {
6538 int64_t res;
6539 #if !defined(__NR_llseek)
6540 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6541 if (res == -1) {
6542 ret = get_errno(res);
6543 } else {
6544 ret = 0;
6545 }
6546 #else
6547 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6548 #endif
6549 if ((ret == 0) && put_user_s64(res, arg4)) {
6550 goto efault;
6551 }
6552 }
6553 break;
6554 #endif
6555 case TARGET_NR_getdents:
6556 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6557 {
6558 struct target_dirent *target_dirp;
6559 struct linux_dirent *dirp;
6560 abi_long count = arg3;
6561
6562 dirp = malloc(count);
6563 if (!dirp) {
6564 ret = -TARGET_ENOMEM;
6565 goto fail;
6566 }
6567
6568 ret = get_errno(sys_getdents(arg1, dirp, count));
6569 if (!is_error(ret)) {
6570 struct linux_dirent *de;
6571 struct target_dirent *tde;
6572 int len = ret;
6573 int reclen, treclen;
6574 int count1, tnamelen;
6575
6576 count1 = 0;
6577 de = dirp;
6578 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6579 goto efault;
6580 tde = target_dirp;
6581 while (len > 0) {
6582 reclen = de->d_reclen;
6583 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6584 tde->d_reclen = tswap16(treclen);
6585 tde->d_ino = tswapal(de->d_ino);
6586 tde->d_off = tswapal(de->d_off);
6587 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6588 if (tnamelen > 256)
6589 tnamelen = 256;
6590 /* XXX: may not be correct */
6591 pstrcpy(tde->d_name, tnamelen, de->d_name);
6592 de = (struct linux_dirent *)((char *)de + reclen);
6593 len -= reclen;
6594 tde = (struct target_dirent *)((char *)tde + treclen);
6595 count1 += treclen;
6596 }
6597 ret = count1;
6598 unlock_user(target_dirp, arg2, ret);
6599 }
6600 free(dirp);
6601 }
6602 #else
6603 {
6604 struct linux_dirent *dirp;
6605 abi_long count = arg3;
6606
6607 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6608 goto efault;
6609 ret = get_errno(sys_getdents(arg1, dirp, count));
6610 if (!is_error(ret)) {
6611 struct linux_dirent *de;
6612 int len = ret;
6613 int reclen;
6614 de = dirp;
6615 while (len > 0) {
6616 reclen = de->d_reclen;
6617 if (reclen > len)
6618 break;
6619 de->d_reclen = tswap16(reclen);
6620 tswapls(&de->d_ino);
6621 tswapls(&de->d_off);
6622 de = (struct linux_dirent *)((char *)de + reclen);
6623 len -= reclen;
6624 }
6625 }
6626 unlock_user(dirp, arg2, ret);
6627 }
6628 #endif
6629 break;
6630 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6631 case TARGET_NR_getdents64:
6632 {
6633 struct linux_dirent64 *dirp;
6634 abi_long count = arg3;
6635 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6636 goto efault;
6637 ret = get_errno(sys_getdents64(arg1, dirp, count));
6638 if (!is_error(ret)) {
6639 struct linux_dirent64 *de;
6640 int len = ret;
6641 int reclen;
6642 de = dirp;
6643 while (len > 0) {
6644 reclen = de->d_reclen;
6645 if (reclen > len)
6646 break;
6647 de->d_reclen = tswap16(reclen);
6648 tswap64s((uint64_t *)&de->d_ino);
6649 tswap64s((uint64_t *)&de->d_off);
6650 de = (struct linux_dirent64 *)((char *)de + reclen);
6651 len -= reclen;
6652 }
6653 }
6654 unlock_user(dirp, arg2, ret);
6655 }
6656 break;
6657 #endif /* TARGET_NR_getdents64 */
6658 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6659 #ifdef TARGET_S390X
6660 case TARGET_NR_select:
6661 #else
6662 case TARGET_NR__newselect:
6663 #endif
6664 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6665 break;
6666 #endif
6667 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6668 # ifdef TARGET_NR_poll
6669 case TARGET_NR_poll:
6670 # endif
6671 # ifdef TARGET_NR_ppoll
6672 case TARGET_NR_ppoll:
6673 # endif
6674 {
6675 struct target_pollfd *target_pfd;
6676 unsigned int nfds = arg2;
6677 int timeout = arg3;
6678 struct pollfd *pfd;
6679 unsigned int i;
6680
6681 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6682 if (!target_pfd)
6683 goto efault;
6684
6685 pfd = alloca(sizeof(struct pollfd) * nfds);
6686 for(i = 0; i < nfds; i++) {
6687 pfd[i].fd = tswap32(target_pfd[i].fd);
6688 pfd[i].events = tswap16(target_pfd[i].events);
6689 }
6690
6691 # ifdef TARGET_NR_ppoll
6692 if (num == TARGET_NR_ppoll) {
6693 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6694 target_sigset_t *target_set;
6695 sigset_t _set, *set = &_set;
6696
6697 if (arg3) {
6698 if (target_to_host_timespec(timeout_ts, arg3)) {
6699 unlock_user(target_pfd, arg1, 0);
6700 goto efault;
6701 }
6702 } else {
6703 timeout_ts = NULL;
6704 }
6705
6706 if (arg4) {
6707 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6708 if (!target_set) {
6709 unlock_user(target_pfd, arg1, 0);
6710 goto efault;
6711 }
6712 target_to_host_sigset(set, target_set);
6713 } else {
6714 set = NULL;
6715 }
6716
6717 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6718
6719 if (!is_error(ret) && arg3) {
6720 host_to_target_timespec(arg3, timeout_ts);
6721 }
6722 if (arg4) {
6723 unlock_user(target_set, arg4, 0);
6724 }
6725 } else
6726 # endif
6727 ret = get_errno(poll(pfd, nfds, timeout));
6728
6729 if (!is_error(ret)) {
6730 for(i = 0; i < nfds; i++) {
6731 target_pfd[i].revents = tswap16(pfd[i].revents);
6732 }
6733 }
6734 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6735 }
6736 break;
6737 #endif
6738 case TARGET_NR_flock:
6739 /* NOTE: the flock constant seems to be the same for every
6740 Linux platform */
6741 ret = get_errno(flock(arg1, arg2));
6742 break;
6743 case TARGET_NR_readv:
6744 {
6745 int count = arg3;
6746 struct iovec *vec;
6747
6748 vec = alloca(count * sizeof(struct iovec));
6749 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6750 goto efault;
6751 ret = get_errno(readv(arg1, vec, count));
6752 unlock_iovec(vec, arg2, count, 1);
6753 }
6754 break;
6755 case TARGET_NR_writev:
6756 {
6757 int count = arg3;
6758 struct iovec *vec;
6759
6760 vec = alloca(count * sizeof(struct iovec));
6761 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6762 goto efault;
6763 ret = get_errno(writev(arg1, vec, count));
6764 unlock_iovec(vec, arg2, count, 0);
6765 }
6766 break;
6767 case TARGET_NR_getsid:
6768 ret = get_errno(getsid(arg1));
6769 break;
6770 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6771 case TARGET_NR_fdatasync:
6772 ret = get_errno(fdatasync(arg1));
6773 break;
6774 #endif
6775 case TARGET_NR__sysctl:
6776 /* We don't implement this, but ENOTDIR is always a safe
6777 return value. */
6778 ret = -TARGET_ENOTDIR;
6779 break;
6780 case TARGET_NR_sched_getaffinity:
6781 {
6782 unsigned int mask_size;
6783 unsigned long *mask;
6784
6785 /*
6786 * sched_getaffinity needs multiples of ulong, so need to take
6787 * care of mismatches between target ulong and host ulong sizes.
6788 */
6789 if (arg2 & (sizeof(abi_ulong) - 1)) {
6790 ret = -TARGET_EINVAL;
6791 break;
6792 }
6793 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6794
6795 mask = alloca(mask_size);
6796 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6797
6798 if (!is_error(ret)) {
6799 if (copy_to_user(arg3, mask, ret)) {
6800 goto efault;
6801 }
6802 }
6803 }
6804 break;
6805 case TARGET_NR_sched_setaffinity:
6806 {
6807 unsigned int mask_size;
6808 unsigned long *mask;
6809
6810 /*
6811 * sched_setaffinity needs multiples of ulong, so need to take
6812 * care of mismatches between target ulong and host ulong sizes.
6813 */
6814 if (arg2 & (sizeof(abi_ulong) - 1)) {
6815 ret = -TARGET_EINVAL;
6816 break;
6817 }
6818 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6819
6820 mask = alloca(mask_size);
6821 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6822 goto efault;
6823 }
6824 memcpy(mask, p, arg2);
6825 unlock_user_struct(p, arg2, 0);
6826
6827 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6828 }
6829 break;
6830 case TARGET_NR_sched_setparam:
6831 {
6832 struct sched_param *target_schp;
6833 struct sched_param schp;
6834
6835 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6836 goto efault;
6837 schp.sched_priority = tswap32(target_schp->sched_priority);
6838 unlock_user_struct(target_schp, arg2, 0);
6839 ret = get_errno(sched_setparam(arg1, &schp));
6840 }
6841 break;
6842 case TARGET_NR_sched_getparam:
6843 {
6844 struct sched_param *target_schp;
6845 struct sched_param schp;
6846 ret = get_errno(sched_getparam(arg1, &schp));
6847 if (!is_error(ret)) {
6848 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6849 goto efault;
6850 target_schp->sched_priority = tswap32(schp.sched_priority);
6851 unlock_user_struct(target_schp, arg2, 1);
6852 }
6853 }
6854 break;
6855 case TARGET_NR_sched_setscheduler:
6856 {
6857 struct sched_param *target_schp;
6858 struct sched_param schp;
6859 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6860 goto efault;
6861 schp.sched_priority = tswap32(target_schp->sched_priority);
6862 unlock_user_struct(target_schp, arg3, 0);
6863 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6864 }
6865 break;
6866 case TARGET_NR_sched_getscheduler:
6867 ret = get_errno(sched_getscheduler(arg1));
6868 break;
6869 case TARGET_NR_sched_yield:
6870 ret = get_errno(sched_yield());
6871 break;
6872 case TARGET_NR_sched_get_priority_max:
6873 ret = get_errno(sched_get_priority_max(arg1));
6874 break;
6875 case TARGET_NR_sched_get_priority_min:
6876 ret = get_errno(sched_get_priority_min(arg1));
6877 break;
6878 case TARGET_NR_sched_rr_get_interval:
6879 {
6880 struct timespec ts;
6881 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6882 if (!is_error(ret)) {
6883 host_to_target_timespec(arg2, &ts);
6884 }
6885 }
6886 break;
6887 case TARGET_NR_nanosleep:
6888 {
6889 struct timespec req, rem;
6890 target_to_host_timespec(&req, arg1);
6891 ret = get_errno(nanosleep(&req, &rem));
6892 if (is_error(ret) && arg2) {
6893 host_to_target_timespec(arg2, &rem);
6894 }
6895 }
6896 break;
6897 #ifdef TARGET_NR_query_module
6898 case TARGET_NR_query_module:
6899 goto unimplemented;
6900 #endif
6901 #ifdef TARGET_NR_nfsservctl
6902 case TARGET_NR_nfsservctl:
6903 goto unimplemented;
6904 #endif
6905 case TARGET_NR_prctl:
6906 switch (arg1)
6907 {
6908 case PR_GET_PDEATHSIG:
6909 {
6910 int deathsig;
6911 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6912 if (!is_error(ret) && arg2
6913 && put_user_ual(deathsig, arg2))
6914 goto efault;
6915 }
6916 break;
6917 default:
6918 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6919 break;
6920 }
6921 break;
6922 #ifdef TARGET_NR_arch_prctl
6923 case TARGET_NR_arch_prctl:
6924 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6925 ret = do_arch_prctl(cpu_env, arg1, arg2);
6926 break;
6927 #else
6928 goto unimplemented;
6929 #endif
6930 #endif
6931 #ifdef TARGET_NR_pread
6932 case TARGET_NR_pread:
6933 if (regpairs_aligned(cpu_env))
6934 arg4 = arg5;
6935 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6936 goto efault;
6937 ret = get_errno(pread(arg1, p, arg3, arg4));
6938 unlock_user(p, arg2, ret);
6939 break;
6940 case TARGET_NR_pwrite:
6941 if (regpairs_aligned(cpu_env))
6942 arg4 = arg5;
6943 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6944 goto efault;
6945 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6946 unlock_user(p, arg2, 0);
6947 break;
6948 #endif
6949 #ifdef TARGET_NR_pread64
6950 case TARGET_NR_pread64:
6951 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6952 goto efault;
6953 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6954 unlock_user(p, arg2, ret);
6955 break;
6956 case TARGET_NR_pwrite64:
6957 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6958 goto efault;
6959 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6960 unlock_user(p, arg2, 0);
6961 break;
6962 #endif
6963 case TARGET_NR_getcwd:
6964 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6965 goto efault;
6966 ret = get_errno(sys_getcwd1(p, arg2));
6967 unlock_user(p, arg1, ret);
6968 break;
6969 case TARGET_NR_capget:
6970 goto unimplemented;
6971 case TARGET_NR_capset:
6972 goto unimplemented;
6973 case TARGET_NR_sigaltstack:
6974 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6975 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6976 defined(TARGET_M68K) || defined(TARGET_S390X)
6977 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6978 break;
6979 #else
6980 goto unimplemented;
6981 #endif
6982 case TARGET_NR_sendfile:
6983 goto unimplemented;
6984 #ifdef TARGET_NR_getpmsg
6985 case TARGET_NR_getpmsg:
6986 goto unimplemented;
6987 #endif
6988 #ifdef TARGET_NR_putpmsg
6989 case TARGET_NR_putpmsg:
6990 goto unimplemented;
6991 #endif
6992 #ifdef TARGET_NR_vfork
6993 case TARGET_NR_vfork:
6994 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6995 0, 0, 0, 0));
6996 break;
6997 #endif
6998 #ifdef TARGET_NR_ugetrlimit
6999 case TARGET_NR_ugetrlimit:
7000 {
7001 struct rlimit rlim;
7002 int resource = target_to_host_resource(arg1);
7003 ret = get_errno(getrlimit(resource, &rlim));
7004 if (!is_error(ret)) {
7005 struct target_rlimit *target_rlim;
7006 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7007 goto efault;
7008 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7009 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7010 unlock_user_struct(target_rlim, arg2, 1);
7011 }
7012 break;
7013 }
7014 #endif
7015 #ifdef TARGET_NR_truncate64
7016 case TARGET_NR_truncate64:
7017 if (!(p = lock_user_string(arg1)))
7018 goto efault;
7019 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7020 unlock_user(p, arg1, 0);
7021 break;
7022 #endif
7023 #ifdef TARGET_NR_ftruncate64
7024 case TARGET_NR_ftruncate64:
7025 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7026 break;
7027 #endif
7028 #ifdef TARGET_NR_stat64
7029 case TARGET_NR_stat64:
7030 if (!(p = lock_user_string(arg1)))
7031 goto efault;
7032 ret = get_errno(stat(path(p), &st));
7033 unlock_user(p, arg1, 0);
7034 if (!is_error(ret))
7035 ret = host_to_target_stat64(cpu_env, arg2, &st);
7036 break;
7037 #endif
7038 #ifdef TARGET_NR_lstat64
7039 case TARGET_NR_lstat64:
7040 if (!(p = lock_user_string(arg1)))
7041 goto efault;
7042 ret = get_errno(lstat(path(p), &st));
7043 unlock_user(p, arg1, 0);
7044 if (!is_error(ret))
7045 ret = host_to_target_stat64(cpu_env, arg2, &st);
7046 break;
7047 #endif
7048 #ifdef TARGET_NR_fstat64
7049 case TARGET_NR_fstat64:
7050 ret = get_errno(fstat(arg1, &st));
7051 if (!is_error(ret))
7052 ret = host_to_target_stat64(cpu_env, arg2, &st);
7053 break;
7054 #endif
7055 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7056 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7057 #ifdef TARGET_NR_fstatat64
7058 case TARGET_NR_fstatat64:
7059 #endif
7060 #ifdef TARGET_NR_newfstatat
7061 case TARGET_NR_newfstatat:
7062 #endif
7063 if (!(p = lock_user_string(arg2)))
7064 goto efault;
7065 #ifdef __NR_fstatat64
7066 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7067 #else
7068 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7069 #endif
7070 if (!is_error(ret))
7071 ret = host_to_target_stat64(cpu_env, arg3, &st);
7072 break;
7073 #endif
7074 case TARGET_NR_lchown:
7075 if (!(p = lock_user_string(arg1)))
7076 goto efault;
7077 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7078 unlock_user(p, arg1, 0);
7079 break;
7080 #ifdef TARGET_NR_getuid
7081 case TARGET_NR_getuid:
7082 ret = get_errno(high2lowuid(getuid()));
7083 break;
7084 #endif
7085 #ifdef TARGET_NR_getgid
7086 case TARGET_NR_getgid:
7087 ret = get_errno(high2lowgid(getgid()));
7088 break;
7089 #endif
7090 #ifdef TARGET_NR_geteuid
7091 case TARGET_NR_geteuid:
7092 ret = get_errno(high2lowuid(geteuid()));
7093 break;
7094 #endif
7095 #ifdef TARGET_NR_getegid
7096 case TARGET_NR_getegid:
7097 ret = get_errno(high2lowgid(getegid()));
7098 break;
7099 #endif
7100 case TARGET_NR_setreuid:
7101 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7102 break;
7103 case TARGET_NR_setregid:
7104 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7105 break;
7106 case TARGET_NR_getgroups:
7107 {
7108 int gidsetsize = arg1;
7109 target_id *target_grouplist;
7110 gid_t *grouplist;
7111 int i;
7112
7113 grouplist = alloca(gidsetsize * sizeof(gid_t));
7114 ret = get_errno(getgroups(gidsetsize, grouplist));
7115 if (gidsetsize == 0)
7116 break;
7117 if (!is_error(ret)) {
7118 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7119 if (!target_grouplist)
7120 goto efault;
7121 for(i = 0;i < ret; i++)
7122 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7123 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7124 }
7125 }
7126 break;
7127 case TARGET_NR_setgroups:
7128 {
7129 int gidsetsize = arg1;
7130 target_id *target_grouplist;
7131 gid_t *grouplist;
7132 int i;
7133
7134 grouplist = alloca(gidsetsize * sizeof(gid_t));
7135 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7136 if (!target_grouplist) {
7137 ret = -TARGET_EFAULT;
7138 goto fail;
7139 }
7140 for(i = 0;i < gidsetsize; i++)
7141 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7142 unlock_user(target_grouplist, arg2, 0);
7143 ret = get_errno(setgroups(gidsetsize, grouplist));
7144 }
7145 break;
7146 case TARGET_NR_fchown:
7147 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7148 break;
7149 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7150 case TARGET_NR_fchownat:
7151 if (!(p = lock_user_string(arg2)))
7152 goto efault;
7153 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7154 unlock_user(p, arg2, 0);
7155 break;
7156 #endif
7157 #ifdef TARGET_NR_setresuid
7158 case TARGET_NR_setresuid:
7159 ret = get_errno(setresuid(low2highuid(arg1),
7160 low2highuid(arg2),
7161 low2highuid(arg3)));
7162 break;
7163 #endif
7164 #ifdef TARGET_NR_getresuid
7165 case TARGET_NR_getresuid:
7166 {
7167 uid_t ruid, euid, suid;
7168 ret = get_errno(getresuid(&ruid, &euid, &suid));
7169 if (!is_error(ret)) {
7170 if (put_user_u16(high2lowuid(ruid), arg1)
7171 || put_user_u16(high2lowuid(euid), arg2)
7172 || put_user_u16(high2lowuid(suid), arg3))
7173 goto efault;
7174 }
7175 }
7176 break;
7177 #endif
7178 #ifdef TARGET_NR_getresgid
7179 case TARGET_NR_setresgid:
7180 ret = get_errno(setresgid(low2highgid(arg1),
7181 low2highgid(arg2),
7182 low2highgid(arg3)));
7183 break;
7184 #endif
7185 #ifdef TARGET_NR_getresgid
7186 case TARGET_NR_getresgid:
7187 {
7188 gid_t rgid, egid, sgid;
7189 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7190 if (!is_error(ret)) {
7191 if (put_user_u16(high2lowgid(rgid), arg1)
7192 || put_user_u16(high2lowgid(egid), arg2)
7193 || put_user_u16(high2lowgid(sgid), arg3))
7194 goto efault;
7195 }
7196 }
7197 break;
7198 #endif
7199 case TARGET_NR_chown:
7200 if (!(p = lock_user_string(arg1)))
7201 goto efault;
7202 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7203 unlock_user(p, arg1, 0);
7204 break;
7205 case TARGET_NR_setuid:
7206 ret = get_errno(setuid(low2highuid(arg1)));
7207 break;
7208 case TARGET_NR_setgid:
7209 ret = get_errno(setgid(low2highgid(arg1)));
7210 break;
7211 case TARGET_NR_setfsuid:
7212 ret = get_errno(setfsuid(arg1));
7213 break;
7214 case TARGET_NR_setfsgid:
7215 ret = get_errno(setfsgid(arg1));
7216 break;
7217
7218 #ifdef TARGET_NR_lchown32
7219 case TARGET_NR_lchown32:
7220 if (!(p = lock_user_string(arg1)))
7221 goto efault;
7222 ret = get_errno(lchown(p, arg2, arg3));
7223 unlock_user(p, arg1, 0);
7224 break;
7225 #endif
7226 #ifdef TARGET_NR_getuid32
7227 case TARGET_NR_getuid32:
7228 ret = get_errno(getuid());
7229 break;
7230 #endif
7231
7232 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7233 /* Alpha specific */
7234 case TARGET_NR_getxuid:
7235 {
7236 uid_t euid;
7237 euid=geteuid();
7238 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7239 }
7240 ret = get_errno(getuid());
7241 break;
7242 #endif
7243 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7244 /* Alpha specific */
7245 case TARGET_NR_getxgid:
7246 {
7247 uid_t egid;
7248 egid=getegid();
7249 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7250 }
7251 ret = get_errno(getgid());
7252 break;
7253 #endif
7254 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7255 /* Alpha specific */
7256 case TARGET_NR_osf_getsysinfo:
7257 ret = -TARGET_EOPNOTSUPP;
7258 switch (arg1) {
7259 case TARGET_GSI_IEEE_FP_CONTROL:
7260 {
7261 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7262
7263 /* Copied from linux ieee_fpcr_to_swcr. */
7264 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7265 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7266 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7267 | SWCR_TRAP_ENABLE_DZE
7268 | SWCR_TRAP_ENABLE_OVF);
7269 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7270 | SWCR_TRAP_ENABLE_INE);
7271 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7272 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7273
7274 if (put_user_u64 (swcr, arg2))
7275 goto efault;
7276 ret = 0;
7277 }
7278 break;
7279
7280 /* case GSI_IEEE_STATE_AT_SIGNAL:
7281 -- Not implemented in linux kernel.
7282 case GSI_UACPROC:
7283 -- Retrieves current unaligned access state; not much used.
7284 case GSI_PROC_TYPE:
7285 -- Retrieves implver information; surely not used.
7286 case GSI_GET_HWRPB:
7287 -- Grabs a copy of the HWRPB; surely not used.
7288 */
7289 }
7290 break;
7291 #endif
7292 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7293 /* Alpha specific */
7294 case TARGET_NR_osf_setsysinfo:
7295 ret = -TARGET_EOPNOTSUPP;
7296 switch (arg1) {
7297 case TARGET_SSI_IEEE_FP_CONTROL:
7298 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7299 {
7300 uint64_t swcr, fpcr, orig_fpcr;
7301
7302 if (get_user_u64 (swcr, arg2))
7303 goto efault;
7304 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7305 fpcr = orig_fpcr & FPCR_DYN_MASK;
7306
7307 /* Copied from linux ieee_swcr_to_fpcr. */
7308 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7309 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7310 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7311 | SWCR_TRAP_ENABLE_DZE
7312 | SWCR_TRAP_ENABLE_OVF)) << 48;
7313 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7314 | SWCR_TRAP_ENABLE_INE)) << 57;
7315 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7316 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7317
7318 cpu_alpha_store_fpcr (cpu_env, fpcr);
7319 ret = 0;
7320
7321 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7322 /* Old exceptions are not signaled. */
7323 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7324
7325 /* If any exceptions set by this call, and are unmasked,
7326 send a signal. */
7327 /* ??? FIXME */
7328 }
7329 }
7330 break;
7331
7332 /* case SSI_NVPAIRS:
7333 -- Used with SSIN_UACPROC to enable unaligned accesses.
7334 case SSI_IEEE_STATE_AT_SIGNAL:
7335 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7336 -- Not implemented in linux kernel
7337 */
7338 }
7339 break;
7340 #endif
7341 #ifdef TARGET_NR_osf_sigprocmask
7342 /* Alpha specific. */
7343 case TARGET_NR_osf_sigprocmask:
7344 {
7345 abi_ulong mask;
7346 int how;
7347 sigset_t set, oldset;
7348
7349 switch(arg1) {
7350 case TARGET_SIG_BLOCK:
7351 how = SIG_BLOCK;
7352 break;
7353 case TARGET_SIG_UNBLOCK:
7354 how = SIG_UNBLOCK;
7355 break;
7356 case TARGET_SIG_SETMASK:
7357 how = SIG_SETMASK;
7358 break;
7359 default:
7360 ret = -TARGET_EINVAL;
7361 goto fail;
7362 }
7363 mask = arg2;
7364 target_to_host_old_sigset(&set, &mask);
7365 sigprocmask(how, &set, &oldset);
7366 host_to_target_old_sigset(&mask, &oldset);
7367 ret = mask;
7368 }
7369 break;
7370 #endif
7371
7372 #ifdef TARGET_NR_getgid32
7373 case TARGET_NR_getgid32:
7374 ret = get_errno(getgid());
7375 break;
7376 #endif
7377 #ifdef TARGET_NR_geteuid32
7378 case TARGET_NR_geteuid32:
7379 ret = get_errno(geteuid());
7380 break;
7381 #endif
7382 #ifdef TARGET_NR_getegid32
7383 case TARGET_NR_getegid32:
7384 ret = get_errno(getegid());
7385 break;
7386 #endif
7387 #ifdef TARGET_NR_setreuid32
7388 case TARGET_NR_setreuid32:
7389 ret = get_errno(setreuid(arg1, arg2));
7390 break;
7391 #endif
7392 #ifdef TARGET_NR_setregid32
7393 case TARGET_NR_setregid32:
7394 ret = get_errno(setregid(arg1, arg2));
7395 break;
7396 #endif
7397 #ifdef TARGET_NR_getgroups32
7398 case TARGET_NR_getgroups32:
7399 {
7400 int gidsetsize = arg1;
7401 uint32_t *target_grouplist;
7402 gid_t *grouplist;
7403 int i;
7404
7405 grouplist = alloca(gidsetsize * sizeof(gid_t));
7406 ret = get_errno(getgroups(gidsetsize, grouplist));
7407 if (gidsetsize == 0)
7408 break;
7409 if (!is_error(ret)) {
7410 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7411 if (!target_grouplist) {
7412 ret = -TARGET_EFAULT;
7413 goto fail;
7414 }
7415 for(i = 0;i < ret; i++)
7416 target_grouplist[i] = tswap32(grouplist[i]);
7417 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7418 }
7419 }
7420 break;
7421 #endif
7422 #ifdef TARGET_NR_setgroups32
7423 case TARGET_NR_setgroups32:
7424 {
7425 int gidsetsize = arg1;
7426 uint32_t *target_grouplist;
7427 gid_t *grouplist;
7428 int i;
7429
7430 grouplist = alloca(gidsetsize * sizeof(gid_t));
7431 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7432 if (!target_grouplist) {
7433 ret = -TARGET_EFAULT;
7434 goto fail;
7435 }
7436 for(i = 0;i < gidsetsize; i++)
7437 grouplist[i] = tswap32(target_grouplist[i]);
7438 unlock_user(target_grouplist, arg2, 0);
7439 ret = get_errno(setgroups(gidsetsize, grouplist));
7440 }
7441 break;
7442 #endif
7443 #ifdef TARGET_NR_fchown32
7444 case TARGET_NR_fchown32:
7445 ret = get_errno(fchown(arg1, arg2, arg3));
7446 break;
7447 #endif
7448 #ifdef TARGET_NR_setresuid32
7449 case TARGET_NR_setresuid32:
7450 ret = get_errno(setresuid(arg1, arg2, arg3));
7451 break;
7452 #endif
7453 #ifdef TARGET_NR_getresuid32
7454 case TARGET_NR_getresuid32:
7455 {
7456 uid_t ruid, euid, suid;
7457 ret = get_errno(getresuid(&ruid, &euid, &suid));
7458 if (!is_error(ret)) {
7459 if (put_user_u32(ruid, arg1)
7460 || put_user_u32(euid, arg2)
7461 || put_user_u32(suid, arg3))
7462 goto efault;
7463 }
7464 }
7465 break;
7466 #endif
7467 #ifdef TARGET_NR_setresgid32
7468 case TARGET_NR_setresgid32:
7469 ret = get_errno(setresgid(arg1, arg2, arg3));
7470 break;
7471 #endif
7472 #ifdef TARGET_NR_getresgid32
7473 case TARGET_NR_getresgid32:
7474 {
7475 gid_t rgid, egid, sgid;
7476 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7477 if (!is_error(ret)) {
7478 if (put_user_u32(rgid, arg1)
7479 || put_user_u32(egid, arg2)
7480 || put_user_u32(sgid, arg3))
7481 goto efault;
7482 }
7483 }
7484 break;
7485 #endif
7486 #ifdef TARGET_NR_chown32
7487 case TARGET_NR_chown32:
7488 if (!(p = lock_user_string(arg1)))
7489 goto efault;
7490 ret = get_errno(chown(p, arg2, arg3));
7491 unlock_user(p, arg1, 0);
7492 break;
7493 #endif
7494 #ifdef TARGET_NR_setuid32
7495 case TARGET_NR_setuid32:
7496 ret = get_errno(setuid(arg1));
7497 break;
7498 #endif
7499 #ifdef TARGET_NR_setgid32
7500 case TARGET_NR_setgid32:
7501 ret = get_errno(setgid(arg1));
7502 break;
7503 #endif
7504 #ifdef TARGET_NR_setfsuid32
7505 case TARGET_NR_setfsuid32:
7506 ret = get_errno(setfsuid(arg1));
7507 break;
7508 #endif
7509 #ifdef TARGET_NR_setfsgid32
7510 case TARGET_NR_setfsgid32:
7511 ret = get_errno(setfsgid(arg1));
7512 break;
7513 #endif
7514
7515 case TARGET_NR_pivot_root:
7516 goto unimplemented;
7517 #ifdef TARGET_NR_mincore
7518 case TARGET_NR_mincore:
7519 {
7520 void *a;
7521 ret = -TARGET_EFAULT;
7522 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7523 goto efault;
7524 if (!(p = lock_user_string(arg3)))
7525 goto mincore_fail;
7526 ret = get_errno(mincore(a, arg2, p));
7527 unlock_user(p, arg3, ret);
7528 mincore_fail:
7529 unlock_user(a, arg1, 0);
7530 }
7531 break;
7532 #endif
7533 #ifdef TARGET_NR_arm_fadvise64_64
7534 case TARGET_NR_arm_fadvise64_64:
7535 {
7536 /*
7537 * arm_fadvise64_64 looks like fadvise64_64 but
7538 * with different argument order
7539 */
7540 abi_long temp;
7541 temp = arg3;
7542 arg3 = arg4;
7543 arg4 = temp;
7544 }
7545 #endif
7546 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7547 #ifdef TARGET_NR_fadvise64_64
7548 case TARGET_NR_fadvise64_64:
7549 #endif
7550 #ifdef TARGET_NR_fadvise64
7551 case TARGET_NR_fadvise64:
7552 #endif
7553 #ifdef TARGET_S390X
7554 switch (arg4) {
7555 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7556 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7557 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7558 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7559 default: break;
7560 }
7561 #endif
7562 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7563 break;
7564 #endif
7565 #ifdef TARGET_NR_madvise
7566 case TARGET_NR_madvise:
7567 /* A straight passthrough may not be safe because qemu sometimes
7568 turns private flie-backed mappings into anonymous mappings.
7569 This will break MADV_DONTNEED.
7570 This is a hint, so ignoring and returning success is ok. */
7571 ret = get_errno(0);
7572 break;
7573 #endif
7574 #if TARGET_ABI_BITS == 32
7575 case TARGET_NR_fcntl64:
7576 {
7577 int cmd;
7578 struct flock64 fl;
7579 struct target_flock64 *target_fl;
7580 #ifdef TARGET_ARM
7581 struct target_eabi_flock64 *target_efl;
7582 #endif
7583
7584 cmd = target_to_host_fcntl_cmd(arg2);
7585 if (cmd == -TARGET_EINVAL) {
7586 ret = cmd;
7587 break;
7588 }
7589
7590 switch(arg2) {
7591 case TARGET_F_GETLK64:
7592 #ifdef TARGET_ARM
7593 if (((CPUARMState *)cpu_env)->eabi) {
7594 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7595 goto efault;
7596 fl.l_type = tswap16(target_efl->l_type);
7597 fl.l_whence = tswap16(target_efl->l_whence);
7598 fl.l_start = tswap64(target_efl->l_start);
7599 fl.l_len = tswap64(target_efl->l_len);
7600 fl.l_pid = tswap32(target_efl->l_pid);
7601 unlock_user_struct(target_efl, arg3, 0);
7602 } else
7603 #endif
7604 {
7605 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7606 goto efault;
7607 fl.l_type = tswap16(target_fl->l_type);
7608 fl.l_whence = tswap16(target_fl->l_whence);
7609 fl.l_start = tswap64(target_fl->l_start);
7610 fl.l_len = tswap64(target_fl->l_len);
7611 fl.l_pid = tswap32(target_fl->l_pid);
7612 unlock_user_struct(target_fl, arg3, 0);
7613 }
7614 ret = get_errno(fcntl(arg1, cmd, &fl));
7615 if (ret == 0) {
7616 #ifdef TARGET_ARM
7617 if (((CPUARMState *)cpu_env)->eabi) {
7618 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7619 goto efault;
7620 target_efl->l_type = tswap16(fl.l_type);
7621 target_efl->l_whence = tswap16(fl.l_whence);
7622 target_efl->l_start = tswap64(fl.l_start);
7623 target_efl->l_len = tswap64(fl.l_len);
7624 target_efl->l_pid = tswap32(fl.l_pid);
7625 unlock_user_struct(target_efl, arg3, 1);
7626 } else
7627 #endif
7628 {
7629 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7630 goto efault;
7631 target_fl->l_type = tswap16(fl.l_type);
7632 target_fl->l_whence = tswap16(fl.l_whence);
7633 target_fl->l_start = tswap64(fl.l_start);
7634 target_fl->l_len = tswap64(fl.l_len);
7635 target_fl->l_pid = tswap32(fl.l_pid);
7636 unlock_user_struct(target_fl, arg3, 1);
7637 }
7638 }
7639 break;
7640
7641 case TARGET_F_SETLK64:
7642 case TARGET_F_SETLKW64:
7643 #ifdef TARGET_ARM
7644 if (((CPUARMState *)cpu_env)->eabi) {
7645 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7646 goto efault;
7647 fl.l_type = tswap16(target_efl->l_type);
7648 fl.l_whence = tswap16(target_efl->l_whence);
7649 fl.l_start = tswap64(target_efl->l_start);
7650 fl.l_len = tswap64(target_efl->l_len);
7651 fl.l_pid = tswap32(target_efl->l_pid);
7652 unlock_user_struct(target_efl, arg3, 0);
7653 } else
7654 #endif
7655 {
7656 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7657 goto efault;
7658 fl.l_type = tswap16(target_fl->l_type);
7659 fl.l_whence = tswap16(target_fl->l_whence);
7660 fl.l_start = tswap64(target_fl->l_start);
7661 fl.l_len = tswap64(target_fl->l_len);
7662 fl.l_pid = tswap32(target_fl->l_pid);
7663 unlock_user_struct(target_fl, arg3, 0);
7664 }
7665 ret = get_errno(fcntl(arg1, cmd, &fl));
7666 break;
7667 default:
7668 ret = do_fcntl(arg1, arg2, arg3);
7669 break;
7670 }
7671 break;
7672 }
7673 #endif
7674 #ifdef TARGET_NR_cacheflush
7675 case TARGET_NR_cacheflush:
7676 /* self-modifying code is handled automatically, so nothing needed */
7677 ret = 0;
7678 break;
7679 #endif
7680 #ifdef TARGET_NR_security
7681 case TARGET_NR_security:
7682 goto unimplemented;
7683 #endif
7684 #ifdef TARGET_NR_getpagesize
7685 case TARGET_NR_getpagesize:
7686 ret = TARGET_PAGE_SIZE;
7687 break;
7688 #endif
7689 case TARGET_NR_gettid:
7690 ret = get_errno(gettid());
7691 break;
7692 #ifdef TARGET_NR_readahead
7693 case TARGET_NR_readahead:
7694 #if TARGET_ABI_BITS == 32
7695 if (regpairs_aligned(cpu_env)) {
7696 arg2 = arg3;
7697 arg3 = arg4;
7698 arg4 = arg5;
7699 }
7700 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7701 #else
7702 ret = get_errno(readahead(arg1, arg2, arg3));
7703 #endif
7704 break;
7705 #endif
7706 #ifdef CONFIG_ATTR
7707 #ifdef TARGET_NR_setxattr
7708 case TARGET_NR_lsetxattr:
7709 case TARGET_NR_fsetxattr:
7710 case TARGET_NR_lgetxattr:
7711 case TARGET_NR_fgetxattr:
7712 case TARGET_NR_listxattr:
7713 case TARGET_NR_llistxattr:
7714 case TARGET_NR_flistxattr:
7715 case TARGET_NR_lremovexattr:
7716 case TARGET_NR_fremovexattr:
7717 ret = -TARGET_EOPNOTSUPP;
7718 break;
7719 case TARGET_NR_setxattr:
7720 {
7721 void *p, *n, *v;
7722 p = lock_user_string(arg1);
7723 n = lock_user_string(arg2);
7724 v = lock_user(VERIFY_READ, arg3, arg4, 1);
7725 if (p && n && v) {
7726 ret = get_errno(setxattr(p, n, v, arg4, arg5));
7727 } else {
7728 ret = -TARGET_EFAULT;
7729 }
7730 unlock_user(p, arg1, 0);
7731 unlock_user(n, arg2, 0);
7732 unlock_user(v, arg3, 0);
7733 }
7734 break;
7735 case TARGET_NR_getxattr:
7736 {
7737 void *p, *n, *v;
7738 p = lock_user_string(arg1);
7739 n = lock_user_string(arg2);
7740 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7741 if (p && n && v) {
7742 ret = get_errno(getxattr(p, n, v, arg4));
7743 } else {
7744 ret = -TARGET_EFAULT;
7745 }
7746 unlock_user(p, arg1, 0);
7747 unlock_user(n, arg2, 0);
7748 unlock_user(v, arg3, arg4);
7749 }
7750 break;
7751 case TARGET_NR_removexattr:
7752 {
7753 void *p, *n;
7754 p = lock_user_string(arg1);
7755 n = lock_user_string(arg2);
7756 if (p && n) {
7757 ret = get_errno(removexattr(p, n));
7758 } else {
7759 ret = -TARGET_EFAULT;
7760 }
7761 unlock_user(p, arg1, 0);
7762 unlock_user(n, arg2, 0);
7763 }
7764 break;
7765 #endif
7766 #endif /* CONFIG_ATTR */
7767 #ifdef TARGET_NR_set_thread_area
7768 case TARGET_NR_set_thread_area:
7769 #if defined(TARGET_MIPS)
7770 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7771 ret = 0;
7772 break;
7773 #elif defined(TARGET_CRIS)
7774 if (arg1 & 0xff)
7775 ret = -TARGET_EINVAL;
7776 else {
7777 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7778 ret = 0;
7779 }
7780 break;
7781 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7782 ret = do_set_thread_area(cpu_env, arg1);
7783 break;
7784 #else
7785 goto unimplemented_nowarn;
7786 #endif
7787 #endif
7788 #ifdef TARGET_NR_get_thread_area
7789 case TARGET_NR_get_thread_area:
7790 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7791 ret = do_get_thread_area(cpu_env, arg1);
7792 #else
7793 goto unimplemented_nowarn;
7794 #endif
7795 #endif
7796 #ifdef TARGET_NR_getdomainname
7797 case TARGET_NR_getdomainname:
7798 goto unimplemented_nowarn;
7799 #endif
7800
7801 #ifdef TARGET_NR_clock_gettime
7802 case TARGET_NR_clock_gettime:
7803 {
7804 struct timespec ts;
7805 ret = get_errno(clock_gettime(arg1, &ts));
7806 if (!is_error(ret)) {
7807 host_to_target_timespec(arg2, &ts);
7808 }
7809 break;
7810 }
7811 #endif
7812 #ifdef TARGET_NR_clock_getres
7813 case TARGET_NR_clock_getres:
7814 {
7815 struct timespec ts;
7816 ret = get_errno(clock_getres(arg1, &ts));
7817 if (!is_error(ret)) {
7818 host_to_target_timespec(arg2, &ts);
7819 }
7820 break;
7821 }
7822 #endif
7823 #ifdef TARGET_NR_clock_nanosleep
7824 case TARGET_NR_clock_nanosleep:
7825 {
7826 struct timespec ts;
7827 target_to_host_timespec(&ts, arg3);
7828 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7829 if (arg4)
7830 host_to_target_timespec(arg4, &ts);
7831 break;
7832 }
7833 #endif
7834
7835 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7836 case TARGET_NR_set_tid_address:
7837 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7838 break;
7839 #endif
7840
7841 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7842 case TARGET_NR_tkill:
7843 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7844 break;
7845 #endif
7846
7847 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7848 case TARGET_NR_tgkill:
7849 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7850 target_to_host_signal(arg3)));
7851 break;
7852 #endif
7853
7854 #ifdef TARGET_NR_set_robust_list
7855 case TARGET_NR_set_robust_list:
7856 goto unimplemented_nowarn;
7857 #endif
7858
7859 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7860 case TARGET_NR_utimensat:
7861 {
7862 struct timespec *tsp, ts[2];
7863 if (!arg3) {
7864 tsp = NULL;
7865 } else {
7866 target_to_host_timespec(ts, arg3);
7867 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7868 tsp = ts;
7869 }
7870 if (!arg2)
7871 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7872 else {
7873 if (!(p = lock_user_string(arg2))) {
7874 ret = -TARGET_EFAULT;
7875 goto fail;
7876 }
7877 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7878 unlock_user(p, arg2, 0);
7879 }
7880 }
7881 break;
7882 #endif
7883 #if defined(CONFIG_USE_NPTL)
7884 case TARGET_NR_futex:
7885 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7886 break;
7887 #endif
7888 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7889 case TARGET_NR_inotify_init:
7890 ret = get_errno(sys_inotify_init());
7891 break;
7892 #endif
7893 #ifdef CONFIG_INOTIFY1
7894 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7895 case TARGET_NR_inotify_init1:
7896 ret = get_errno(sys_inotify_init1(arg1));
7897 break;
7898 #endif
7899 #endif
7900 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7901 case TARGET_NR_inotify_add_watch:
7902 p = lock_user_string(arg2);
7903 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7904 unlock_user(p, arg2, 0);
7905 break;
7906 #endif
7907 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7908 case TARGET_NR_inotify_rm_watch:
7909 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7910 break;
7911 #endif
7912
7913 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7914 case TARGET_NR_mq_open:
7915 {
7916 struct mq_attr posix_mq_attr;
7917
7918 p = lock_user_string(arg1 - 1);
7919 if (arg4 != 0)
7920 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7921 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7922 unlock_user (p, arg1, 0);
7923 }
7924 break;
7925
7926 case TARGET_NR_mq_unlink:
7927 p = lock_user_string(arg1 - 1);
7928 ret = get_errno(mq_unlink(p));
7929 unlock_user (p, arg1, 0);
7930 break;
7931
7932 case TARGET_NR_mq_timedsend:
7933 {
7934 struct timespec ts;
7935
7936 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7937 if (arg5 != 0) {
7938 target_to_host_timespec(&ts, arg5);
7939 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7940 host_to_target_timespec(arg5, &ts);
7941 }
7942 else
7943 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7944 unlock_user (p, arg2, arg3);
7945 }
7946 break;
7947
7948 case TARGET_NR_mq_timedreceive:
7949 {
7950 struct timespec ts;
7951 unsigned int prio;
7952
7953 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7954 if (arg5 != 0) {
7955 target_to_host_timespec(&ts, arg5);
7956 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7957 host_to_target_timespec(arg5, &ts);
7958 }
7959 else
7960 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7961 unlock_user (p, arg2, arg3);
7962 if (arg4 != 0)
7963 put_user_u32(prio, arg4);
7964 }
7965 break;
7966
7967 /* Not implemented for now... */
7968 /* case TARGET_NR_mq_notify: */
7969 /* break; */
7970
7971 case TARGET_NR_mq_getsetattr:
7972 {
7973 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7974 ret = 0;
7975 if (arg3 != 0) {
7976 ret = mq_getattr(arg1, &posix_mq_attr_out);
7977 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7978 }
7979 if (arg2 != 0) {
7980 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7981 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7982 }
7983
7984 }
7985 break;
7986 #endif
7987
7988 #ifdef CONFIG_SPLICE
7989 #ifdef TARGET_NR_tee
7990 case TARGET_NR_tee:
7991 {
7992 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7993 }
7994 break;
7995 #endif
7996 #ifdef TARGET_NR_splice
7997 case TARGET_NR_splice:
7998 {
7999 loff_t loff_in, loff_out;
8000 loff_t *ploff_in = NULL, *ploff_out = NULL;
8001 if(arg2) {
8002 get_user_u64(loff_in, arg2);
8003 ploff_in = &loff_in;
8004 }
8005 if(arg4) {
8006 get_user_u64(loff_out, arg2);
8007 ploff_out = &loff_out;
8008 }
8009 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8010 }
8011 break;
8012 #endif
8013 #ifdef TARGET_NR_vmsplice
8014 case TARGET_NR_vmsplice:
8015 {
8016 int count = arg3;
8017 struct iovec *vec;
8018
8019 vec = alloca(count * sizeof(struct iovec));
8020 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
8021 goto efault;
8022 ret = get_errno(vmsplice(arg1, vec, count, arg4));
8023 unlock_iovec(vec, arg2, count, 0);
8024 }
8025 break;
8026 #endif
8027 #endif /* CONFIG_SPLICE */
8028 #ifdef CONFIG_EVENTFD
8029 #if defined(TARGET_NR_eventfd)
8030 case TARGET_NR_eventfd:
8031 ret = get_errno(eventfd(arg1, 0));
8032 break;
8033 #endif
8034 #if defined(TARGET_NR_eventfd2)
8035 case TARGET_NR_eventfd2:
8036 ret = get_errno(eventfd(arg1, arg2));
8037 break;
8038 #endif
8039 #endif /* CONFIG_EVENTFD */
8040 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8041 case TARGET_NR_fallocate:
8042 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8043 break;
8044 #endif
8045 #if defined(CONFIG_SYNC_FILE_RANGE)
8046 #if defined(TARGET_NR_sync_file_range)
8047 case TARGET_NR_sync_file_range:
8048 #if TARGET_ABI_BITS == 32
8049 #if defined(TARGET_MIPS)
8050 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8051 target_offset64(arg5, arg6), arg7));
8052 #else
8053 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8054 target_offset64(arg4, arg5), arg6));
8055 #endif /* !TARGET_MIPS */
8056 #else
8057 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8058 #endif
8059 break;
8060 #endif
8061 #if defined(TARGET_NR_sync_file_range2)
8062 case TARGET_NR_sync_file_range2:
8063 /* This is like sync_file_range but the arguments are reordered */
8064 #if TARGET_ABI_BITS == 32
8065 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8066 target_offset64(arg5, arg6), arg2));
8067 #else
8068 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8069 #endif
8070 break;
8071 #endif
8072 #endif
8073 #if defined(CONFIG_EPOLL)
8074 #if defined(TARGET_NR_epoll_create)
8075 case TARGET_NR_epoll_create:
8076 ret = get_errno(epoll_create(arg1));
8077 break;
8078 #endif
8079 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8080 case TARGET_NR_epoll_create1:
8081 ret = get_errno(epoll_create1(arg1));
8082 break;
8083 #endif
8084 #if defined(TARGET_NR_epoll_ctl)
8085 case TARGET_NR_epoll_ctl:
8086 {
8087 struct epoll_event ep;
8088 struct epoll_event *epp = 0;
8089 if (arg4) {
8090 struct target_epoll_event *target_ep;
8091 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8092 goto efault;
8093 }
8094 ep.events = tswap32(target_ep->events);
8095 /* The epoll_data_t union is just opaque data to the kernel,
8096 * so we transfer all 64 bits across and need not worry what
8097 * actual data type it is.
8098 */
8099 ep.data.u64 = tswap64(target_ep->data.u64);
8100 unlock_user_struct(target_ep, arg4, 0);
8101 epp = &ep;
8102 }
8103 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8104 break;
8105 }
8106 #endif
8107
8108 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8109 #define IMPLEMENT_EPOLL_PWAIT
8110 #endif
8111 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8112 #if defined(TARGET_NR_epoll_wait)
8113 case TARGET_NR_epoll_wait:
8114 #endif
8115 #if defined(IMPLEMENT_EPOLL_PWAIT)
8116 case TARGET_NR_epoll_pwait:
8117 #endif
8118 {
8119 struct target_epoll_event *target_ep;
8120 struct epoll_event *ep;
8121 int epfd = arg1;
8122 int maxevents = arg3;
8123 int timeout = arg4;
8124
8125 target_ep = lock_user(VERIFY_WRITE, arg2,
8126 maxevents * sizeof(struct target_epoll_event), 1);
8127 if (!target_ep) {
8128 goto efault;
8129 }
8130
8131 ep = alloca(maxevents * sizeof(struct epoll_event));
8132
8133 switch (num) {
8134 #if defined(IMPLEMENT_EPOLL_PWAIT)
8135 case TARGET_NR_epoll_pwait:
8136 {
8137 target_sigset_t *target_set;
8138 sigset_t _set, *set = &_set;
8139
8140 if (arg5) {
8141 target_set = lock_user(VERIFY_READ, arg5,
8142 sizeof(target_sigset_t), 1);
8143 if (!target_set) {
8144 unlock_user(target_ep, arg2, 0);
8145 goto efault;
8146 }
8147 target_to_host_sigset(set, target_set);
8148 unlock_user(target_set, arg5, 0);
8149 } else {
8150 set = NULL;
8151 }
8152
8153 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8154 break;
8155 }
8156 #endif
8157 #if defined(TARGET_NR_epoll_wait)
8158 case TARGET_NR_epoll_wait:
8159 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8160 break;
8161 #endif
8162 default:
8163 ret = -TARGET_ENOSYS;
8164 }
8165 if (!is_error(ret)) {
8166 int i;
8167 for (i = 0; i < ret; i++) {
8168 target_ep[i].events = tswap32(ep[i].events);
8169 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8170 }
8171 }
8172 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8173 break;
8174 }
8175 #endif
8176 #endif
8177 #ifdef TARGET_NR_prlimit64
8178 case TARGET_NR_prlimit64:
8179 {
8180 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8181 struct target_rlimit64 *target_rnew, *target_rold;
8182 struct host_rlimit64 rnew, rold, *rnewp = 0;
8183 if (arg3) {
8184 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8185 goto efault;
8186 }
8187 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8188 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8189 unlock_user_struct(target_rnew, arg3, 0);
8190 rnewp = &rnew;
8191 }
8192
8193 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8194 if (!is_error(ret) && arg4) {
8195 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8196 goto efault;
8197 }
8198 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8199 target_rold->rlim_max = tswap64(rold.rlim_max);
8200 unlock_user_struct(target_rold, arg4, 1);
8201 }
8202 break;
8203 }
8204 #endif
8205 default:
8206 unimplemented:
8207 gemu_log("qemu: Unsupported syscall: %d\n", num);
8208 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8209 unimplemented_nowarn:
8210 #endif
8211 ret = -TARGET_ENOSYS;
8212 break;
8213 }
8214 fail:
8215 #ifdef DEBUG
8216 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8217 #endif
8218 if(do_strace)
8219 print_syscall_ret(num, ret);
8220 return ret;
8221 efault:
8222 ret = -TARGET_EFAULT;
8223 goto fail;
8224 }