]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
Fix include statements for qemu-common.h
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
80
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #include <linux/vt.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
97
98 #include "qemu.h"
99
100 #if defined(CONFIG_USE_NPTL)
101 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
102 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
103 #else
104 /* XXX: Hardcode the above values. */
105 #define CLONE_NPTL_FLAGS2 0
106 #endif
107
108 //#define DEBUG
109
110 //#include <linux/msdos_fs.h>
111 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
112 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
113
114
115 #undef _syscall0
116 #undef _syscall1
117 #undef _syscall2
118 #undef _syscall3
119 #undef _syscall4
120 #undef _syscall5
121 #undef _syscall6
122
123 #define _syscall0(type,name) \
124 static type name (void) \
125 { \
126 return syscall(__NR_##name); \
127 }
128
129 #define _syscall1(type,name,type1,arg1) \
130 static type name (type1 arg1) \
131 { \
132 return syscall(__NR_##name, arg1); \
133 }
134
135 #define _syscall2(type,name,type1,arg1,type2,arg2) \
136 static type name (type1 arg1,type2 arg2) \
137 { \
138 return syscall(__NR_##name, arg1, arg2); \
139 }
140
141 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
142 static type name (type1 arg1,type2 arg2,type3 arg3) \
143 { \
144 return syscall(__NR_##name, arg1, arg2, arg3); \
145 }
146
147 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
148 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
149 { \
150 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
151 }
152
153 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
154 type5,arg5) \
155 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
156 { \
157 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
158 }
159
160
161 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
162 type5,arg5,type6,arg6) \
163 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
164 type6 arg6) \
165 { \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
167 }
168
169
170 #define __NR_sys_uname __NR_uname
171 #define __NR_sys_faccessat __NR_faccessat
172 #define __NR_sys_fchmodat __NR_fchmodat
173 #define __NR_sys_fchownat __NR_fchownat
174 #define __NR_sys_fstatat64 __NR_fstatat64
175 #define __NR_sys_futimesat __NR_futimesat
176 #define __NR_sys_getcwd1 __NR_getcwd
177 #define __NR_sys_getdents __NR_getdents
178 #define __NR_sys_getdents64 __NR_getdents64
179 #define __NR_sys_getpriority __NR_getpriority
180 #define __NR_sys_linkat __NR_linkat
181 #define __NR_sys_mkdirat __NR_mkdirat
182 #define __NR_sys_mknodat __NR_mknodat
183 #define __NR_sys_newfstatat __NR_newfstatat
184 #define __NR_sys_openat __NR_openat
185 #define __NR_sys_readlinkat __NR_readlinkat
186 #define __NR_sys_renameat __NR_renameat
187 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
188 #define __NR_sys_symlinkat __NR_symlinkat
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_tgkill __NR_tgkill
191 #define __NR_sys_tkill __NR_tkill
192 #define __NR_sys_unlinkat __NR_unlinkat
193 #define __NR_sys_utimensat __NR_utimensat
194 #define __NR_sys_futex __NR_futex
195 #define __NR_sys_inotify_init __NR_inotify_init
196 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
197 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
198
199 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
200 defined(__s390x__)
201 #define __NR__llseek __NR_lseek
202 #endif
203
204 #ifdef __NR_gettid
205 _syscall0(int, gettid)
206 #else
207 /* This is a replacement for the host gettid() and must return a host
208 errno. */
209 static int gettid(void) {
210 return -ENOSYS;
211 }
212 #endif
213 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
214 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
215 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
216 #endif
217 _syscall2(int, sys_getpriority, int, which, int, who);
218 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
219 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
220 loff_t *, res, uint, wh);
221 #endif
222 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
223 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
224 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
225 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
226 #endif
227 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
228 _syscall2(int,sys_tkill,int,tid,int,sig)
229 #endif
230 #ifdef __NR_exit_group
231 _syscall1(int,exit_group,int,error_code)
232 #endif
233 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
234 _syscall1(int,set_tid_address,int *,tidptr)
235 #endif
236 #if defined(CONFIG_USE_NPTL)
237 #if defined(TARGET_NR_futex) && defined(__NR_futex)
238 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
239 const struct timespec *,timeout,int *,uaddr2,int,val3)
240 #endif
241 #endif
242 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
243 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
244 unsigned long *, user_mask_ptr);
245 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
246 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248
249 static bitmask_transtbl fcntl_flags_tbl[] = {
250 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
251 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
252 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
253 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
254 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
255 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
256 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
257 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
258 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
259 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
260 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
261 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
262 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
263 #if defined(O_DIRECT)
264 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
265 #endif
266 { 0, 0, 0, 0 }
267 };
268
269 #define COPY_UTSNAME_FIELD(dest, src) \
270 do { \
271 /* __NEW_UTS_LEN doesn't include terminating null */ \
272 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
273 (dest)[__NEW_UTS_LEN] = '\0'; \
274 } while (0)
275
276 static int sys_uname(struct new_utsname *buf)
277 {
278 struct utsname uts_buf;
279
280 if (uname(&uts_buf) < 0)
281 return (-1);
282
283 /*
284 * Just in case these have some differences, we
285 * translate utsname to new_utsname (which is the
286 * struct linux kernel uses).
287 */
288
289 memset(buf, 0, sizeof(*buf));
290 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
291 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
292 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
293 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
294 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
295 #ifdef _GNU_SOURCE
296 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
297 #endif
298 return (0);
299
300 #undef COPY_UTSNAME_FIELD
301 }
302
303 static int sys_getcwd1(char *buf, size_t size)
304 {
305 if (getcwd(buf, size) == NULL) {
306 /* getcwd() sets errno */
307 return (-1);
308 }
309 return strlen(buf)+1;
310 }
311
312 #ifdef CONFIG_ATFILE
313 /*
314 * Host system seems to have atfile syscall stubs available. We
315 * now enable them one by one as specified by target syscall_nr.h.
316 */
317
318 #ifdef TARGET_NR_faccessat
319 static int sys_faccessat(int dirfd, const char *pathname, int mode)
320 {
321 return (faccessat(dirfd, pathname, mode, 0));
322 }
323 #endif
324 #ifdef TARGET_NR_fchmodat
325 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
326 {
327 return (fchmodat(dirfd, pathname, mode, 0));
328 }
329 #endif
330 #if defined(TARGET_NR_fchownat)
331 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
332 gid_t group, int flags)
333 {
334 return (fchownat(dirfd, pathname, owner, group, flags));
335 }
336 #endif
337 #ifdef __NR_fstatat64
338 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
339 int flags)
340 {
341 return (fstatat(dirfd, pathname, buf, flags));
342 }
343 #endif
344 #ifdef __NR_newfstatat
345 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
346 int flags)
347 {
348 return (fstatat(dirfd, pathname, buf, flags));
349 }
350 #endif
351 #ifdef TARGET_NR_futimesat
352 static int sys_futimesat(int dirfd, const char *pathname,
353 const struct timeval times[2])
354 {
355 return (futimesat(dirfd, pathname, times));
356 }
357 #endif
358 #ifdef TARGET_NR_linkat
359 static int sys_linkat(int olddirfd, const char *oldpath,
360 int newdirfd, const char *newpath, int flags)
361 {
362 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
363 }
364 #endif
365 #ifdef TARGET_NR_mkdirat
366 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
367 {
368 return (mkdirat(dirfd, pathname, mode));
369 }
370 #endif
371 #ifdef TARGET_NR_mknodat
372 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
373 dev_t dev)
374 {
375 return (mknodat(dirfd, pathname, mode, dev));
376 }
377 #endif
378 #ifdef TARGET_NR_openat
379 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
380 {
381 /*
382 * open(2) has extra parameter 'mode' when called with
383 * flag O_CREAT.
384 */
385 if ((flags & O_CREAT) != 0) {
386 va_list ap;
387 mode_t mode;
388
389 /*
390 * Get the 'mode' parameter and translate it to
391 * host bits.
392 */
393 va_start(ap, flags);
394 mode = va_arg(ap, mode_t);
395 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
396 va_end(ap);
397
398 return (openat(dirfd, pathname, flags, mode));
399 }
400 return (openat(dirfd, pathname, flags));
401 }
402 #endif
403 #ifdef TARGET_NR_readlinkat
404 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
405 {
406 return (readlinkat(dirfd, pathname, buf, bufsiz));
407 }
408 #endif
409 #ifdef TARGET_NR_renameat
410 static int sys_renameat(int olddirfd, const char *oldpath,
411 int newdirfd, const char *newpath)
412 {
413 return (renameat(olddirfd, oldpath, newdirfd, newpath));
414 }
415 #endif
416 #ifdef TARGET_NR_symlinkat
417 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
418 {
419 return (symlinkat(oldpath, newdirfd, newpath));
420 }
421 #endif
422 #ifdef TARGET_NR_unlinkat
423 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
424 {
425 return (unlinkat(dirfd, pathname, flags));
426 }
427 #endif
428 #else /* !CONFIG_ATFILE */
429
430 /*
431 * Try direct syscalls instead
432 */
433 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
434 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
435 #endif
436 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
437 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
438 #endif
439 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
440 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
441 uid_t,owner,gid_t,group,int,flags)
442 #endif
443 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
444 defined(__NR_fstatat64)
445 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
446 struct stat *,buf,int,flags)
447 #endif
448 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
449 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
450 const struct timeval *,times)
451 #endif
452 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
453 defined(__NR_newfstatat)
454 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
455 struct stat *,buf,int,flags)
456 #endif
457 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
458 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
459 int,newdirfd,const char *,newpath,int,flags)
460 #endif
461 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
462 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
463 #endif
464 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
465 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
466 mode_t,mode,dev_t,dev)
467 #endif
468 #if defined(TARGET_NR_openat) && defined(__NR_openat)
469 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
470 #endif
471 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
472 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
473 char *,buf,size_t,bufsize)
474 #endif
475 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
476 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
477 int,newdirfd,const char *,newpath)
478 #endif
479 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
480 _syscall3(int,sys_symlinkat,const char *,oldpath,
481 int,newdirfd,const char *,newpath)
482 #endif
483 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
484 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
485 #endif
486
487 #endif /* CONFIG_ATFILE */
488
489 #ifdef CONFIG_UTIMENSAT
490 static int sys_utimensat(int dirfd, const char *pathname,
491 const struct timespec times[2], int flags)
492 {
493 if (pathname == NULL)
494 return futimens(dirfd, times);
495 else
496 return utimensat(dirfd, pathname, times, flags);
497 }
498 #else
499 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
500 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
501 const struct timespec *,tsp,int,flags)
502 #endif
503 #endif /* CONFIG_UTIMENSAT */
504
505 #ifdef CONFIG_INOTIFY
506 #include <sys/inotify.h>
507
508 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
509 static int sys_inotify_init(void)
510 {
511 return (inotify_init());
512 }
513 #endif
514 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
515 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
516 {
517 return (inotify_add_watch(fd, pathname, mask));
518 }
519 #endif
520 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
521 static int sys_inotify_rm_watch(int fd, int32_t wd)
522 {
523 return (inotify_rm_watch(fd, wd));
524 }
525 #endif
526 #ifdef CONFIG_INOTIFY1
527 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
528 static int sys_inotify_init1(int flags)
529 {
530 return (inotify_init1(flags));
531 }
532 #endif
533 #endif
534 #else
535 /* Userspace can usually survive runtime without inotify */
536 #undef TARGET_NR_inotify_init
537 #undef TARGET_NR_inotify_init1
538 #undef TARGET_NR_inotify_add_watch
539 #undef TARGET_NR_inotify_rm_watch
540 #endif /* CONFIG_INOTIFY */
541
542 #if defined(TARGET_NR_ppoll)
543 #ifndef __NR_ppoll
544 # define __NR_ppoll -1
545 #endif
546 #define __NR_sys_ppoll __NR_ppoll
547 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
548 struct timespec *, timeout, const __sigset_t *, sigmask,
549 size_t, sigsetsize)
550 #endif
551
552 #if defined(TARGET_NR_pselect6)
553 #ifndef __NR_pselect6
554 # define __NR_pselect6 -1
555 #endif
556 #define __NR_sys_pselect6 __NR_pselect6
557 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
558 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
559 #endif
560
561 #if defined(TARGET_NR_prlimit64)
562 #ifndef __NR_prlimit64
563 # define __NR_prlimit64 -1
564 #endif
565 #define __NR_sys_prlimit64 __NR_prlimit64
566 /* The glibc rlimit structure may not be that used by the underlying syscall */
567 struct host_rlimit64 {
568 uint64_t rlim_cur;
569 uint64_t rlim_max;
570 };
571 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
572 const struct host_rlimit64 *, new_limit,
573 struct host_rlimit64 *, old_limit)
574 #endif
575
576 extern int personality(int);
577 extern int flock(int, int);
578 extern int setfsuid(int);
579 extern int setfsgid(int);
580 extern int setgroups(int, gid_t *);
581
582 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
583 #ifdef TARGET_ARM
584 static inline int regpairs_aligned(void *cpu_env) {
585 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
586 }
587 #elif defined(TARGET_MIPS)
588 static inline int regpairs_aligned(void *cpu_env) { return 1; }
589 #else
590 static inline int regpairs_aligned(void *cpu_env) { return 0; }
591 #endif
592
593 #define ERRNO_TABLE_SIZE 1200
594
595 /* target_to_host_errno_table[] is initialized from
596 * host_to_target_errno_table[] in syscall_init(). */
597 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
598 };
599
600 /*
601 * This list is the union of errno values overridden in asm-<arch>/errno.h
602 * minus the errnos that are not actually generic to all archs.
603 */
604 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
605 [EIDRM] = TARGET_EIDRM,
606 [ECHRNG] = TARGET_ECHRNG,
607 [EL2NSYNC] = TARGET_EL2NSYNC,
608 [EL3HLT] = TARGET_EL3HLT,
609 [EL3RST] = TARGET_EL3RST,
610 [ELNRNG] = TARGET_ELNRNG,
611 [EUNATCH] = TARGET_EUNATCH,
612 [ENOCSI] = TARGET_ENOCSI,
613 [EL2HLT] = TARGET_EL2HLT,
614 [EDEADLK] = TARGET_EDEADLK,
615 [ENOLCK] = TARGET_ENOLCK,
616 [EBADE] = TARGET_EBADE,
617 [EBADR] = TARGET_EBADR,
618 [EXFULL] = TARGET_EXFULL,
619 [ENOANO] = TARGET_ENOANO,
620 [EBADRQC] = TARGET_EBADRQC,
621 [EBADSLT] = TARGET_EBADSLT,
622 [EBFONT] = TARGET_EBFONT,
623 [ENOSTR] = TARGET_ENOSTR,
624 [ENODATA] = TARGET_ENODATA,
625 [ETIME] = TARGET_ETIME,
626 [ENOSR] = TARGET_ENOSR,
627 [ENONET] = TARGET_ENONET,
628 [ENOPKG] = TARGET_ENOPKG,
629 [EREMOTE] = TARGET_EREMOTE,
630 [ENOLINK] = TARGET_ENOLINK,
631 [EADV] = TARGET_EADV,
632 [ESRMNT] = TARGET_ESRMNT,
633 [ECOMM] = TARGET_ECOMM,
634 [EPROTO] = TARGET_EPROTO,
635 [EDOTDOT] = TARGET_EDOTDOT,
636 [EMULTIHOP] = TARGET_EMULTIHOP,
637 [EBADMSG] = TARGET_EBADMSG,
638 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
639 [EOVERFLOW] = TARGET_EOVERFLOW,
640 [ENOTUNIQ] = TARGET_ENOTUNIQ,
641 [EBADFD] = TARGET_EBADFD,
642 [EREMCHG] = TARGET_EREMCHG,
643 [ELIBACC] = TARGET_ELIBACC,
644 [ELIBBAD] = TARGET_ELIBBAD,
645 [ELIBSCN] = TARGET_ELIBSCN,
646 [ELIBMAX] = TARGET_ELIBMAX,
647 [ELIBEXEC] = TARGET_ELIBEXEC,
648 [EILSEQ] = TARGET_EILSEQ,
649 [ENOSYS] = TARGET_ENOSYS,
650 [ELOOP] = TARGET_ELOOP,
651 [ERESTART] = TARGET_ERESTART,
652 [ESTRPIPE] = TARGET_ESTRPIPE,
653 [ENOTEMPTY] = TARGET_ENOTEMPTY,
654 [EUSERS] = TARGET_EUSERS,
655 [ENOTSOCK] = TARGET_ENOTSOCK,
656 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
657 [EMSGSIZE] = TARGET_EMSGSIZE,
658 [EPROTOTYPE] = TARGET_EPROTOTYPE,
659 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
660 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
661 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
662 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
663 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
664 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
665 [EADDRINUSE] = TARGET_EADDRINUSE,
666 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
667 [ENETDOWN] = TARGET_ENETDOWN,
668 [ENETUNREACH] = TARGET_ENETUNREACH,
669 [ENETRESET] = TARGET_ENETRESET,
670 [ECONNABORTED] = TARGET_ECONNABORTED,
671 [ECONNRESET] = TARGET_ECONNRESET,
672 [ENOBUFS] = TARGET_ENOBUFS,
673 [EISCONN] = TARGET_EISCONN,
674 [ENOTCONN] = TARGET_ENOTCONN,
675 [EUCLEAN] = TARGET_EUCLEAN,
676 [ENOTNAM] = TARGET_ENOTNAM,
677 [ENAVAIL] = TARGET_ENAVAIL,
678 [EISNAM] = TARGET_EISNAM,
679 [EREMOTEIO] = TARGET_EREMOTEIO,
680 [ESHUTDOWN] = TARGET_ESHUTDOWN,
681 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
682 [ETIMEDOUT] = TARGET_ETIMEDOUT,
683 [ECONNREFUSED] = TARGET_ECONNREFUSED,
684 [EHOSTDOWN] = TARGET_EHOSTDOWN,
685 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
686 [EALREADY] = TARGET_EALREADY,
687 [EINPROGRESS] = TARGET_EINPROGRESS,
688 [ESTALE] = TARGET_ESTALE,
689 [ECANCELED] = TARGET_ECANCELED,
690 [ENOMEDIUM] = TARGET_ENOMEDIUM,
691 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
692 #ifdef ENOKEY
693 [ENOKEY] = TARGET_ENOKEY,
694 #endif
695 #ifdef EKEYEXPIRED
696 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
697 #endif
698 #ifdef EKEYREVOKED
699 [EKEYREVOKED] = TARGET_EKEYREVOKED,
700 #endif
701 #ifdef EKEYREJECTED
702 [EKEYREJECTED] = TARGET_EKEYREJECTED,
703 #endif
704 #ifdef EOWNERDEAD
705 [EOWNERDEAD] = TARGET_EOWNERDEAD,
706 #endif
707 #ifdef ENOTRECOVERABLE
708 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
709 #endif
710 };
711
712 static inline int host_to_target_errno(int err)
713 {
714 if(host_to_target_errno_table[err])
715 return host_to_target_errno_table[err];
716 return err;
717 }
718
719 static inline int target_to_host_errno(int err)
720 {
721 if (target_to_host_errno_table[err])
722 return target_to_host_errno_table[err];
723 return err;
724 }
725
726 static inline abi_long get_errno(abi_long ret)
727 {
728 if (ret == -1)
729 return -host_to_target_errno(errno);
730 else
731 return ret;
732 }
733
734 static inline int is_error(abi_long ret)
735 {
736 return (abi_ulong)ret >= (abi_ulong)(-4096);
737 }
738
739 char *target_strerror(int err)
740 {
741 return strerror(target_to_host_errno(err));
742 }
743
744 static abi_ulong target_brk;
745 static abi_ulong target_original_brk;
746 static abi_ulong brk_page;
747
748 void target_set_brk(abi_ulong new_brk)
749 {
750 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
751 brk_page = HOST_PAGE_ALIGN(target_brk);
752 }
753
754 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
755 #define DEBUGF_BRK(message, args...)
756
757 /* do_brk() must return target values and target errnos. */
758 abi_long do_brk(abi_ulong new_brk)
759 {
760 abi_long mapped_addr;
761 int new_alloc_size;
762
763 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk);
764
765 if (!new_brk) {
766 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk);
767 return target_brk;
768 }
769 if (new_brk < target_original_brk) {
770 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk);
771 return target_brk;
772 }
773
774 /* If the new brk is less than the highest page reserved to the
775 * target heap allocation, set it and we're almost done... */
776 if (new_brk <= brk_page) {
777 /* Heap contents are initialized to zero, as for anonymous
778 * mapped pages. */
779 if (new_brk > target_brk) {
780 memset(g2h(target_brk), 0, new_brk - target_brk);
781 }
782 target_brk = new_brk;
783 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk);
784 return target_brk;
785 }
786
787 /* We need to allocate more memory after the brk... Note that
788 * we don't use MAP_FIXED because that will map over the top of
789 * any existing mapping (like the one with the host libc or qemu
790 * itself); instead we treat "mapped but at wrong address" as
791 * a failure and unmap again.
792 */
793 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
794 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
795 PROT_READ|PROT_WRITE,
796 MAP_ANON|MAP_PRIVATE, 0, 0));
797
798 if (mapped_addr == brk_page) {
799 target_brk = new_brk;
800 brk_page = HOST_PAGE_ALIGN(target_brk);
801 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk);
802 return target_brk;
803 } else if (mapped_addr != -1) {
804 /* Mapped but at wrong address, meaning there wasn't actually
805 * enough space for this brk.
806 */
807 target_munmap(mapped_addr, new_alloc_size);
808 mapped_addr = -1;
809 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk);
810 }
811 else {
812 DEBUGF_BRK("%#010x (otherwise)\n", target_brk);
813 }
814
815 #if defined(TARGET_ALPHA)
816 /* We (partially) emulate OSF/1 on Alpha, which requires we
817 return a proper errno, not an unchanged brk value. */
818 return -TARGET_ENOMEM;
819 #endif
820 /* For everything else, return the previous break. */
821 return target_brk;
822 }
823
824 static inline abi_long copy_from_user_fdset(fd_set *fds,
825 abi_ulong target_fds_addr,
826 int n)
827 {
828 int i, nw, j, k;
829 abi_ulong b, *target_fds;
830
831 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
832 if (!(target_fds = lock_user(VERIFY_READ,
833 target_fds_addr,
834 sizeof(abi_ulong) * nw,
835 1)))
836 return -TARGET_EFAULT;
837
838 FD_ZERO(fds);
839 k = 0;
840 for (i = 0; i < nw; i++) {
841 /* grab the abi_ulong */
842 __get_user(b, &target_fds[i]);
843 for (j = 0; j < TARGET_ABI_BITS; j++) {
844 /* check the bit inside the abi_ulong */
845 if ((b >> j) & 1)
846 FD_SET(k, fds);
847 k++;
848 }
849 }
850
851 unlock_user(target_fds, target_fds_addr, 0);
852
853 return 0;
854 }
855
856 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
857 abi_ulong target_fds_addr,
858 int n)
859 {
860 if (target_fds_addr) {
861 if (copy_from_user_fdset(fds, target_fds_addr, n))
862 return -TARGET_EFAULT;
863 *fds_ptr = fds;
864 } else {
865 *fds_ptr = NULL;
866 }
867 return 0;
868 }
869
870 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
871 const fd_set *fds,
872 int n)
873 {
874 int i, nw, j, k;
875 abi_long v;
876 abi_ulong *target_fds;
877
878 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
879 if (!(target_fds = lock_user(VERIFY_WRITE,
880 target_fds_addr,
881 sizeof(abi_ulong) * nw,
882 0)))
883 return -TARGET_EFAULT;
884
885 k = 0;
886 for (i = 0; i < nw; i++) {
887 v = 0;
888 for (j = 0; j < TARGET_ABI_BITS; j++) {
889 v |= ((FD_ISSET(k, fds) != 0) << j);
890 k++;
891 }
892 __put_user(v, &target_fds[i]);
893 }
894
895 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
896
897 return 0;
898 }
899
900 #if defined(__alpha__)
901 #define HOST_HZ 1024
902 #else
903 #define HOST_HZ 100
904 #endif
905
906 static inline abi_long host_to_target_clock_t(long ticks)
907 {
908 #if HOST_HZ == TARGET_HZ
909 return ticks;
910 #else
911 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
912 #endif
913 }
914
915 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
916 const struct rusage *rusage)
917 {
918 struct target_rusage *target_rusage;
919
920 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
921 return -TARGET_EFAULT;
922 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
923 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
924 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
925 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
926 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
927 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
928 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
929 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
930 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
931 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
932 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
933 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
934 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
935 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
936 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
937 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
938 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
939 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
940 unlock_user_struct(target_rusage, target_addr, 1);
941
942 return 0;
943 }
944
945 static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
946 {
947 target_ulong target_rlim_swap;
948 rlim_t result;
949
950 target_rlim_swap = tswapl(target_rlim);
951 if (target_rlim_swap == TARGET_RLIM_INFINITY || target_rlim_swap != (rlim_t)target_rlim_swap)
952 result = RLIM_INFINITY;
953 else
954 result = target_rlim_swap;
955
956 return result;
957 }
958
959 static inline target_ulong host_to_target_rlim(rlim_t rlim)
960 {
961 target_ulong target_rlim_swap;
962 target_ulong result;
963
964 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
965 target_rlim_swap = TARGET_RLIM_INFINITY;
966 else
967 target_rlim_swap = rlim;
968 result = tswapl(target_rlim_swap);
969
970 return result;
971 }
972
973 static inline int target_to_host_resource(int code)
974 {
975 switch (code) {
976 case TARGET_RLIMIT_AS:
977 return RLIMIT_AS;
978 case TARGET_RLIMIT_CORE:
979 return RLIMIT_CORE;
980 case TARGET_RLIMIT_CPU:
981 return RLIMIT_CPU;
982 case TARGET_RLIMIT_DATA:
983 return RLIMIT_DATA;
984 case TARGET_RLIMIT_FSIZE:
985 return RLIMIT_FSIZE;
986 case TARGET_RLIMIT_LOCKS:
987 return RLIMIT_LOCKS;
988 case TARGET_RLIMIT_MEMLOCK:
989 return RLIMIT_MEMLOCK;
990 case TARGET_RLIMIT_MSGQUEUE:
991 return RLIMIT_MSGQUEUE;
992 case TARGET_RLIMIT_NICE:
993 return RLIMIT_NICE;
994 case TARGET_RLIMIT_NOFILE:
995 return RLIMIT_NOFILE;
996 case TARGET_RLIMIT_NPROC:
997 return RLIMIT_NPROC;
998 case TARGET_RLIMIT_RSS:
999 return RLIMIT_RSS;
1000 case TARGET_RLIMIT_RTPRIO:
1001 return RLIMIT_RTPRIO;
1002 case TARGET_RLIMIT_SIGPENDING:
1003 return RLIMIT_SIGPENDING;
1004 case TARGET_RLIMIT_STACK:
1005 return RLIMIT_STACK;
1006 default:
1007 return code;
1008 }
1009 }
1010
1011 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1012 abi_ulong target_tv_addr)
1013 {
1014 struct target_timeval *target_tv;
1015
1016 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1017 return -TARGET_EFAULT;
1018
1019 __get_user(tv->tv_sec, &target_tv->tv_sec);
1020 __get_user(tv->tv_usec, &target_tv->tv_usec);
1021
1022 unlock_user_struct(target_tv, target_tv_addr, 0);
1023
1024 return 0;
1025 }
1026
1027 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1028 const struct timeval *tv)
1029 {
1030 struct target_timeval *target_tv;
1031
1032 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1033 return -TARGET_EFAULT;
1034
1035 __put_user(tv->tv_sec, &target_tv->tv_sec);
1036 __put_user(tv->tv_usec, &target_tv->tv_usec);
1037
1038 unlock_user_struct(target_tv, target_tv_addr, 1);
1039
1040 return 0;
1041 }
1042
1043 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1044 #include <mqueue.h>
1045
1046 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1047 abi_ulong target_mq_attr_addr)
1048 {
1049 struct target_mq_attr *target_mq_attr;
1050
1051 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1052 target_mq_attr_addr, 1))
1053 return -TARGET_EFAULT;
1054
1055 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1056 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1057 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1058 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1059
1060 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1061
1062 return 0;
1063 }
1064
1065 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1066 const struct mq_attr *attr)
1067 {
1068 struct target_mq_attr *target_mq_attr;
1069
1070 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1071 target_mq_attr_addr, 0))
1072 return -TARGET_EFAULT;
1073
1074 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1075 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1076 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1077 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1078
1079 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1080
1081 return 0;
1082 }
1083 #endif
1084
1085 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1086 /* do_select() must return target values and target errnos. */
1087 static abi_long do_select(int n,
1088 abi_ulong rfd_addr, abi_ulong wfd_addr,
1089 abi_ulong efd_addr, abi_ulong target_tv_addr)
1090 {
1091 fd_set rfds, wfds, efds;
1092 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1093 struct timeval tv, *tv_ptr;
1094 abi_long ret;
1095
1096 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1097 if (ret) {
1098 return ret;
1099 }
1100 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1101 if (ret) {
1102 return ret;
1103 }
1104 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1105 if (ret) {
1106 return ret;
1107 }
1108
1109 if (target_tv_addr) {
1110 if (copy_from_user_timeval(&tv, target_tv_addr))
1111 return -TARGET_EFAULT;
1112 tv_ptr = &tv;
1113 } else {
1114 tv_ptr = NULL;
1115 }
1116
1117 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1118
1119 if (!is_error(ret)) {
1120 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1121 return -TARGET_EFAULT;
1122 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1123 return -TARGET_EFAULT;
1124 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1125 return -TARGET_EFAULT;
1126
1127 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1128 return -TARGET_EFAULT;
1129 }
1130
1131 return ret;
1132 }
1133 #endif
1134
1135 static abi_long do_pipe2(int host_pipe[], int flags)
1136 {
1137 #ifdef CONFIG_PIPE2
1138 return pipe2(host_pipe, flags);
1139 #else
1140 return -ENOSYS;
1141 #endif
1142 }
1143
1144 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1145 int flags, int is_pipe2)
1146 {
1147 int host_pipe[2];
1148 abi_long ret;
1149 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1150
1151 if (is_error(ret))
1152 return get_errno(ret);
1153
1154 /* Several targets have special calling conventions for the original
1155 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1156 if (!is_pipe2) {
1157 #if defined(TARGET_ALPHA)
1158 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1159 return host_pipe[0];
1160 #elif defined(TARGET_MIPS)
1161 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1162 return host_pipe[0];
1163 #elif defined(TARGET_SH4)
1164 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1165 return host_pipe[0];
1166 #endif
1167 }
1168
1169 if (put_user_s32(host_pipe[0], pipedes)
1170 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1171 return -TARGET_EFAULT;
1172 return get_errno(ret);
1173 }
1174
1175 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1176 abi_ulong target_addr,
1177 socklen_t len)
1178 {
1179 struct target_ip_mreqn *target_smreqn;
1180
1181 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1182 if (!target_smreqn)
1183 return -TARGET_EFAULT;
1184 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1185 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1186 if (len == sizeof(struct target_ip_mreqn))
1187 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1188 unlock_user(target_smreqn, target_addr, 0);
1189
1190 return 0;
1191 }
1192
1193 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1194 abi_ulong target_addr,
1195 socklen_t len)
1196 {
1197 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1198 sa_family_t sa_family;
1199 struct target_sockaddr *target_saddr;
1200
1201 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1202 if (!target_saddr)
1203 return -TARGET_EFAULT;
1204
1205 sa_family = tswap16(target_saddr->sa_family);
1206
1207 /* Oops. The caller might send a incomplete sun_path; sun_path
1208 * must be terminated by \0 (see the manual page), but
1209 * unfortunately it is quite common to specify sockaddr_un
1210 * length as "strlen(x->sun_path)" while it should be
1211 * "strlen(...) + 1". We'll fix that here if needed.
1212 * Linux kernel has a similar feature.
1213 */
1214
1215 if (sa_family == AF_UNIX) {
1216 if (len < unix_maxlen && len > 0) {
1217 char *cp = (char*)target_saddr;
1218
1219 if ( cp[len-1] && !cp[len] )
1220 len++;
1221 }
1222 if (len > unix_maxlen)
1223 len = unix_maxlen;
1224 }
1225
1226 memcpy(addr, target_saddr, len);
1227 addr->sa_family = sa_family;
1228 unlock_user(target_saddr, target_addr, 0);
1229
1230 return 0;
1231 }
1232
1233 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1234 struct sockaddr *addr,
1235 socklen_t len)
1236 {
1237 struct target_sockaddr *target_saddr;
1238
1239 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1240 if (!target_saddr)
1241 return -TARGET_EFAULT;
1242 memcpy(target_saddr, addr, len);
1243 target_saddr->sa_family = tswap16(addr->sa_family);
1244 unlock_user(target_saddr, target_addr, len);
1245
1246 return 0;
1247 }
1248
1249 /* ??? Should this also swap msgh->name? */
1250 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1251 struct target_msghdr *target_msgh)
1252 {
1253 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1254 abi_long msg_controllen;
1255 abi_ulong target_cmsg_addr;
1256 struct target_cmsghdr *target_cmsg;
1257 socklen_t space = 0;
1258
1259 msg_controllen = tswapl(target_msgh->msg_controllen);
1260 if (msg_controllen < sizeof (struct target_cmsghdr))
1261 goto the_end;
1262 target_cmsg_addr = tswapl(target_msgh->msg_control);
1263 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1264 if (!target_cmsg)
1265 return -TARGET_EFAULT;
1266
1267 while (cmsg && target_cmsg) {
1268 void *data = CMSG_DATA(cmsg);
1269 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1270
1271 int len = tswapl(target_cmsg->cmsg_len)
1272 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1273
1274 space += CMSG_SPACE(len);
1275 if (space > msgh->msg_controllen) {
1276 space -= CMSG_SPACE(len);
1277 gemu_log("Host cmsg overflow\n");
1278 break;
1279 }
1280
1281 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1282 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1283 cmsg->cmsg_len = CMSG_LEN(len);
1284
1285 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1286 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1287 memcpy(data, target_data, len);
1288 } else {
1289 int *fd = (int *)data;
1290 int *target_fd = (int *)target_data;
1291 int i, numfds = len / sizeof(int);
1292
1293 for (i = 0; i < numfds; i++)
1294 fd[i] = tswap32(target_fd[i]);
1295 }
1296
1297 cmsg = CMSG_NXTHDR(msgh, cmsg);
1298 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1299 }
1300 unlock_user(target_cmsg, target_cmsg_addr, 0);
1301 the_end:
1302 msgh->msg_controllen = space;
1303 return 0;
1304 }
1305
1306 /* ??? Should this also swap msgh->name? */
1307 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1308 struct msghdr *msgh)
1309 {
1310 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1311 abi_long msg_controllen;
1312 abi_ulong target_cmsg_addr;
1313 struct target_cmsghdr *target_cmsg;
1314 socklen_t space = 0;
1315
1316 msg_controllen = tswapl(target_msgh->msg_controllen);
1317 if (msg_controllen < sizeof (struct target_cmsghdr))
1318 goto the_end;
1319 target_cmsg_addr = tswapl(target_msgh->msg_control);
1320 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1321 if (!target_cmsg)
1322 return -TARGET_EFAULT;
1323
1324 while (cmsg && target_cmsg) {
1325 void *data = CMSG_DATA(cmsg);
1326 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1327
1328 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1329
1330 space += TARGET_CMSG_SPACE(len);
1331 if (space > msg_controllen) {
1332 space -= TARGET_CMSG_SPACE(len);
1333 gemu_log("Target cmsg overflow\n");
1334 break;
1335 }
1336
1337 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1338 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1339 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1340
1341 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1342 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1343 memcpy(target_data, data, len);
1344 } else {
1345 int *fd = (int *)data;
1346 int *target_fd = (int *)target_data;
1347 int i, numfds = len / sizeof(int);
1348
1349 for (i = 0; i < numfds; i++)
1350 target_fd[i] = tswap32(fd[i]);
1351 }
1352
1353 cmsg = CMSG_NXTHDR(msgh, cmsg);
1354 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1355 }
1356 unlock_user(target_cmsg, target_cmsg_addr, space);
1357 the_end:
1358 target_msgh->msg_controllen = tswapl(space);
1359 return 0;
1360 }
1361
1362 /* do_setsockopt() Must return target values and target errnos. */
1363 static abi_long do_setsockopt(int sockfd, int level, int optname,
1364 abi_ulong optval_addr, socklen_t optlen)
1365 {
1366 abi_long ret;
1367 int val;
1368 struct ip_mreqn *ip_mreq;
1369 struct ip_mreq_source *ip_mreq_source;
1370
1371 switch(level) {
1372 case SOL_TCP:
1373 /* TCP options all take an 'int' value. */
1374 if (optlen < sizeof(uint32_t))
1375 return -TARGET_EINVAL;
1376
1377 if (get_user_u32(val, optval_addr))
1378 return -TARGET_EFAULT;
1379 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1380 break;
1381 case SOL_IP:
1382 switch(optname) {
1383 case IP_TOS:
1384 case IP_TTL:
1385 case IP_HDRINCL:
1386 case IP_ROUTER_ALERT:
1387 case IP_RECVOPTS:
1388 case IP_RETOPTS:
1389 case IP_PKTINFO:
1390 case IP_MTU_DISCOVER:
1391 case IP_RECVERR:
1392 case IP_RECVTOS:
1393 #ifdef IP_FREEBIND
1394 case IP_FREEBIND:
1395 #endif
1396 case IP_MULTICAST_TTL:
1397 case IP_MULTICAST_LOOP:
1398 val = 0;
1399 if (optlen >= sizeof(uint32_t)) {
1400 if (get_user_u32(val, optval_addr))
1401 return -TARGET_EFAULT;
1402 } else if (optlen >= 1) {
1403 if (get_user_u8(val, optval_addr))
1404 return -TARGET_EFAULT;
1405 }
1406 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1407 break;
1408 case IP_ADD_MEMBERSHIP:
1409 case IP_DROP_MEMBERSHIP:
1410 if (optlen < sizeof (struct target_ip_mreq) ||
1411 optlen > sizeof (struct target_ip_mreqn))
1412 return -TARGET_EINVAL;
1413
1414 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1415 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1416 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1417 break;
1418
1419 case IP_BLOCK_SOURCE:
1420 case IP_UNBLOCK_SOURCE:
1421 case IP_ADD_SOURCE_MEMBERSHIP:
1422 case IP_DROP_SOURCE_MEMBERSHIP:
1423 if (optlen != sizeof (struct target_ip_mreq_source))
1424 return -TARGET_EINVAL;
1425
1426 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1427 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1428 unlock_user (ip_mreq_source, optval_addr, 0);
1429 break;
1430
1431 default:
1432 goto unimplemented;
1433 }
1434 break;
1435 case TARGET_SOL_SOCKET:
1436 switch (optname) {
1437 /* Options with 'int' argument. */
1438 case TARGET_SO_DEBUG:
1439 optname = SO_DEBUG;
1440 break;
1441 case TARGET_SO_REUSEADDR:
1442 optname = SO_REUSEADDR;
1443 break;
1444 case TARGET_SO_TYPE:
1445 optname = SO_TYPE;
1446 break;
1447 case TARGET_SO_ERROR:
1448 optname = SO_ERROR;
1449 break;
1450 case TARGET_SO_DONTROUTE:
1451 optname = SO_DONTROUTE;
1452 break;
1453 case TARGET_SO_BROADCAST:
1454 optname = SO_BROADCAST;
1455 break;
1456 case TARGET_SO_SNDBUF:
1457 optname = SO_SNDBUF;
1458 break;
1459 case TARGET_SO_RCVBUF:
1460 optname = SO_RCVBUF;
1461 break;
1462 case TARGET_SO_KEEPALIVE:
1463 optname = SO_KEEPALIVE;
1464 break;
1465 case TARGET_SO_OOBINLINE:
1466 optname = SO_OOBINLINE;
1467 break;
1468 case TARGET_SO_NO_CHECK:
1469 optname = SO_NO_CHECK;
1470 break;
1471 case TARGET_SO_PRIORITY:
1472 optname = SO_PRIORITY;
1473 break;
1474 #ifdef SO_BSDCOMPAT
1475 case TARGET_SO_BSDCOMPAT:
1476 optname = SO_BSDCOMPAT;
1477 break;
1478 #endif
1479 case TARGET_SO_PASSCRED:
1480 optname = SO_PASSCRED;
1481 break;
1482 case TARGET_SO_TIMESTAMP:
1483 optname = SO_TIMESTAMP;
1484 break;
1485 case TARGET_SO_RCVLOWAT:
1486 optname = SO_RCVLOWAT;
1487 break;
1488 case TARGET_SO_RCVTIMEO:
1489 optname = SO_RCVTIMEO;
1490 break;
1491 case TARGET_SO_SNDTIMEO:
1492 optname = SO_SNDTIMEO;
1493 break;
1494 break;
1495 default:
1496 goto unimplemented;
1497 }
1498 if (optlen < sizeof(uint32_t))
1499 return -TARGET_EINVAL;
1500
1501 if (get_user_u32(val, optval_addr))
1502 return -TARGET_EFAULT;
1503 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1504 break;
1505 default:
1506 unimplemented:
1507 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1508 ret = -TARGET_ENOPROTOOPT;
1509 }
1510 return ret;
1511 }
1512
1513 /* do_getsockopt() Must return target values and target errnos. */
1514 static abi_long do_getsockopt(int sockfd, int level, int optname,
1515 abi_ulong optval_addr, abi_ulong optlen)
1516 {
1517 abi_long ret;
1518 int len, val;
1519 socklen_t lv;
1520
1521 switch(level) {
1522 case TARGET_SOL_SOCKET:
1523 level = SOL_SOCKET;
1524 switch (optname) {
1525 /* These don't just return a single integer */
1526 case TARGET_SO_LINGER:
1527 case TARGET_SO_RCVTIMEO:
1528 case TARGET_SO_SNDTIMEO:
1529 case TARGET_SO_PEERCRED:
1530 case TARGET_SO_PEERNAME:
1531 goto unimplemented;
1532 /* Options with 'int' argument. */
1533 case TARGET_SO_DEBUG:
1534 optname = SO_DEBUG;
1535 goto int_case;
1536 case TARGET_SO_REUSEADDR:
1537 optname = SO_REUSEADDR;
1538 goto int_case;
1539 case TARGET_SO_TYPE:
1540 optname = SO_TYPE;
1541 goto int_case;
1542 case TARGET_SO_ERROR:
1543 optname = SO_ERROR;
1544 goto int_case;
1545 case TARGET_SO_DONTROUTE:
1546 optname = SO_DONTROUTE;
1547 goto int_case;
1548 case TARGET_SO_BROADCAST:
1549 optname = SO_BROADCAST;
1550 goto int_case;
1551 case TARGET_SO_SNDBUF:
1552 optname = SO_SNDBUF;
1553 goto int_case;
1554 case TARGET_SO_RCVBUF:
1555 optname = SO_RCVBUF;
1556 goto int_case;
1557 case TARGET_SO_KEEPALIVE:
1558 optname = SO_KEEPALIVE;
1559 goto int_case;
1560 case TARGET_SO_OOBINLINE:
1561 optname = SO_OOBINLINE;
1562 goto int_case;
1563 case TARGET_SO_NO_CHECK:
1564 optname = SO_NO_CHECK;
1565 goto int_case;
1566 case TARGET_SO_PRIORITY:
1567 optname = SO_PRIORITY;
1568 goto int_case;
1569 #ifdef SO_BSDCOMPAT
1570 case TARGET_SO_BSDCOMPAT:
1571 optname = SO_BSDCOMPAT;
1572 goto int_case;
1573 #endif
1574 case TARGET_SO_PASSCRED:
1575 optname = SO_PASSCRED;
1576 goto int_case;
1577 case TARGET_SO_TIMESTAMP:
1578 optname = SO_TIMESTAMP;
1579 goto int_case;
1580 case TARGET_SO_RCVLOWAT:
1581 optname = SO_RCVLOWAT;
1582 goto int_case;
1583 default:
1584 goto int_case;
1585 }
1586 break;
1587 case SOL_TCP:
1588 /* TCP options all take an 'int' value. */
1589 int_case:
1590 if (get_user_u32(len, optlen))
1591 return -TARGET_EFAULT;
1592 if (len < 0)
1593 return -TARGET_EINVAL;
1594 lv = sizeof(lv);
1595 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1596 if (ret < 0)
1597 return ret;
1598 if (len > lv)
1599 len = lv;
1600 if (len == 4) {
1601 if (put_user_u32(val, optval_addr))
1602 return -TARGET_EFAULT;
1603 } else {
1604 if (put_user_u8(val, optval_addr))
1605 return -TARGET_EFAULT;
1606 }
1607 if (put_user_u32(len, optlen))
1608 return -TARGET_EFAULT;
1609 break;
1610 case SOL_IP:
1611 switch(optname) {
1612 case IP_TOS:
1613 case IP_TTL:
1614 case IP_HDRINCL:
1615 case IP_ROUTER_ALERT:
1616 case IP_RECVOPTS:
1617 case IP_RETOPTS:
1618 case IP_PKTINFO:
1619 case IP_MTU_DISCOVER:
1620 case IP_RECVERR:
1621 case IP_RECVTOS:
1622 #ifdef IP_FREEBIND
1623 case IP_FREEBIND:
1624 #endif
1625 case IP_MULTICAST_TTL:
1626 case IP_MULTICAST_LOOP:
1627 if (get_user_u32(len, optlen))
1628 return -TARGET_EFAULT;
1629 if (len < 0)
1630 return -TARGET_EINVAL;
1631 lv = sizeof(lv);
1632 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1633 if (ret < 0)
1634 return ret;
1635 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1636 len = 1;
1637 if (put_user_u32(len, optlen)
1638 || put_user_u8(val, optval_addr))
1639 return -TARGET_EFAULT;
1640 } else {
1641 if (len > sizeof(int))
1642 len = sizeof(int);
1643 if (put_user_u32(len, optlen)
1644 || put_user_u32(val, optval_addr))
1645 return -TARGET_EFAULT;
1646 }
1647 break;
1648 default:
1649 ret = -TARGET_ENOPROTOOPT;
1650 break;
1651 }
1652 break;
1653 default:
1654 unimplemented:
1655 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1656 level, optname);
1657 ret = -TARGET_EOPNOTSUPP;
1658 break;
1659 }
1660 return ret;
1661 }
1662
1663 /* FIXME
1664 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1665 * other lock functions have a return code of 0 for failure.
1666 */
1667 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1668 int count, int copy)
1669 {
1670 struct target_iovec *target_vec;
1671 abi_ulong base;
1672 int i;
1673
1674 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1675 if (!target_vec)
1676 return -TARGET_EFAULT;
1677 for(i = 0;i < count; i++) {
1678 base = tswapl(target_vec[i].iov_base);
1679 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1680 if (vec[i].iov_len != 0) {
1681 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1682 /* Don't check lock_user return value. We must call writev even
1683 if a element has invalid base address. */
1684 } else {
1685 /* zero length pointer is ignored */
1686 vec[i].iov_base = NULL;
1687 }
1688 }
1689 unlock_user (target_vec, target_addr, 0);
1690 return 0;
1691 }
1692
1693 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1694 int count, int copy)
1695 {
1696 struct target_iovec *target_vec;
1697 abi_ulong base;
1698 int i;
1699
1700 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1701 if (!target_vec)
1702 return -TARGET_EFAULT;
1703 for(i = 0;i < count; i++) {
1704 if (target_vec[i].iov_base) {
1705 base = tswapl(target_vec[i].iov_base);
1706 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1707 }
1708 }
1709 unlock_user (target_vec, target_addr, 0);
1710
1711 return 0;
1712 }
1713
1714 /* do_socket() Must return target values and target errnos. */
1715 static abi_long do_socket(int domain, int type, int protocol)
1716 {
1717 #if defined(TARGET_MIPS)
1718 switch(type) {
1719 case TARGET_SOCK_DGRAM:
1720 type = SOCK_DGRAM;
1721 break;
1722 case TARGET_SOCK_STREAM:
1723 type = SOCK_STREAM;
1724 break;
1725 case TARGET_SOCK_RAW:
1726 type = SOCK_RAW;
1727 break;
1728 case TARGET_SOCK_RDM:
1729 type = SOCK_RDM;
1730 break;
1731 case TARGET_SOCK_SEQPACKET:
1732 type = SOCK_SEQPACKET;
1733 break;
1734 case TARGET_SOCK_PACKET:
1735 type = SOCK_PACKET;
1736 break;
1737 }
1738 #endif
1739 if (domain == PF_NETLINK)
1740 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1741 return get_errno(socket(domain, type, protocol));
1742 }
1743
1744 /* do_bind() Must return target values and target errnos. */
1745 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1746 socklen_t addrlen)
1747 {
1748 void *addr;
1749 abi_long ret;
1750
1751 if ((int)addrlen < 0) {
1752 return -TARGET_EINVAL;
1753 }
1754
1755 addr = alloca(addrlen+1);
1756
1757 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1758 if (ret)
1759 return ret;
1760
1761 return get_errno(bind(sockfd, addr, addrlen));
1762 }
1763
1764 /* do_connect() Must return target values and target errnos. */
1765 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1766 socklen_t addrlen)
1767 {
1768 void *addr;
1769 abi_long ret;
1770
1771 if ((int)addrlen < 0) {
1772 return -TARGET_EINVAL;
1773 }
1774
1775 addr = alloca(addrlen);
1776
1777 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1778 if (ret)
1779 return ret;
1780
1781 return get_errno(connect(sockfd, addr, addrlen));
1782 }
1783
1784 /* do_sendrecvmsg() Must return target values and target errnos. */
1785 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1786 int flags, int send)
1787 {
1788 abi_long ret, len;
1789 struct target_msghdr *msgp;
1790 struct msghdr msg;
1791 int count;
1792 struct iovec *vec;
1793 abi_ulong target_vec;
1794
1795 /* FIXME */
1796 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1797 msgp,
1798 target_msg,
1799 send ? 1 : 0))
1800 return -TARGET_EFAULT;
1801 if (msgp->msg_name) {
1802 msg.msg_namelen = tswap32(msgp->msg_namelen);
1803 msg.msg_name = alloca(msg.msg_namelen);
1804 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1805 msg.msg_namelen);
1806 if (ret) {
1807 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1808 return ret;
1809 }
1810 } else {
1811 msg.msg_name = NULL;
1812 msg.msg_namelen = 0;
1813 }
1814 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1815 msg.msg_control = alloca(msg.msg_controllen);
1816 msg.msg_flags = tswap32(msgp->msg_flags);
1817
1818 count = tswapl(msgp->msg_iovlen);
1819 vec = alloca(count * sizeof(struct iovec));
1820 target_vec = tswapl(msgp->msg_iov);
1821 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1822 msg.msg_iovlen = count;
1823 msg.msg_iov = vec;
1824
1825 if (send) {
1826 ret = target_to_host_cmsg(&msg, msgp);
1827 if (ret == 0)
1828 ret = get_errno(sendmsg(fd, &msg, flags));
1829 } else {
1830 ret = get_errno(recvmsg(fd, &msg, flags));
1831 if (!is_error(ret)) {
1832 len = ret;
1833 ret = host_to_target_cmsg(msgp, &msg);
1834 if (!is_error(ret))
1835 ret = len;
1836 }
1837 }
1838 unlock_iovec(vec, target_vec, count, !send);
1839 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1840 return ret;
1841 }
1842
1843 /* do_accept() Must return target values and target errnos. */
1844 static abi_long do_accept(int fd, abi_ulong target_addr,
1845 abi_ulong target_addrlen_addr)
1846 {
1847 socklen_t addrlen;
1848 void *addr;
1849 abi_long ret;
1850
1851 if (target_addr == 0)
1852 return get_errno(accept(fd, NULL, NULL));
1853
1854 /* linux returns EINVAL if addrlen pointer is invalid */
1855 if (get_user_u32(addrlen, target_addrlen_addr))
1856 return -TARGET_EINVAL;
1857
1858 if ((int)addrlen < 0) {
1859 return -TARGET_EINVAL;
1860 }
1861
1862 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1863 return -TARGET_EINVAL;
1864
1865 addr = alloca(addrlen);
1866
1867 ret = get_errno(accept(fd, addr, &addrlen));
1868 if (!is_error(ret)) {
1869 host_to_target_sockaddr(target_addr, addr, addrlen);
1870 if (put_user_u32(addrlen, target_addrlen_addr))
1871 ret = -TARGET_EFAULT;
1872 }
1873 return ret;
1874 }
1875
1876 /* do_getpeername() Must return target values and target errnos. */
1877 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1878 abi_ulong target_addrlen_addr)
1879 {
1880 socklen_t addrlen;
1881 void *addr;
1882 abi_long ret;
1883
1884 if (get_user_u32(addrlen, target_addrlen_addr))
1885 return -TARGET_EFAULT;
1886
1887 if ((int)addrlen < 0) {
1888 return -TARGET_EINVAL;
1889 }
1890
1891 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1892 return -TARGET_EFAULT;
1893
1894 addr = alloca(addrlen);
1895
1896 ret = get_errno(getpeername(fd, addr, &addrlen));
1897 if (!is_error(ret)) {
1898 host_to_target_sockaddr(target_addr, addr, addrlen);
1899 if (put_user_u32(addrlen, target_addrlen_addr))
1900 ret = -TARGET_EFAULT;
1901 }
1902 return ret;
1903 }
1904
1905 /* do_getsockname() Must return target values and target errnos. */
1906 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1907 abi_ulong target_addrlen_addr)
1908 {
1909 socklen_t addrlen;
1910 void *addr;
1911 abi_long ret;
1912
1913 if (get_user_u32(addrlen, target_addrlen_addr))
1914 return -TARGET_EFAULT;
1915
1916 if ((int)addrlen < 0) {
1917 return -TARGET_EINVAL;
1918 }
1919
1920 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1921 return -TARGET_EFAULT;
1922
1923 addr = alloca(addrlen);
1924
1925 ret = get_errno(getsockname(fd, addr, &addrlen));
1926 if (!is_error(ret)) {
1927 host_to_target_sockaddr(target_addr, addr, addrlen);
1928 if (put_user_u32(addrlen, target_addrlen_addr))
1929 ret = -TARGET_EFAULT;
1930 }
1931 return ret;
1932 }
1933
1934 /* do_socketpair() Must return target values and target errnos. */
1935 static abi_long do_socketpair(int domain, int type, int protocol,
1936 abi_ulong target_tab_addr)
1937 {
1938 int tab[2];
1939 abi_long ret;
1940
1941 ret = get_errno(socketpair(domain, type, protocol, tab));
1942 if (!is_error(ret)) {
1943 if (put_user_s32(tab[0], target_tab_addr)
1944 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1945 ret = -TARGET_EFAULT;
1946 }
1947 return ret;
1948 }
1949
1950 /* do_sendto() Must return target values and target errnos. */
1951 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1952 abi_ulong target_addr, socklen_t addrlen)
1953 {
1954 void *addr;
1955 void *host_msg;
1956 abi_long ret;
1957
1958 if ((int)addrlen < 0) {
1959 return -TARGET_EINVAL;
1960 }
1961
1962 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1963 if (!host_msg)
1964 return -TARGET_EFAULT;
1965 if (target_addr) {
1966 addr = alloca(addrlen);
1967 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1968 if (ret) {
1969 unlock_user(host_msg, msg, 0);
1970 return ret;
1971 }
1972 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1973 } else {
1974 ret = get_errno(send(fd, host_msg, len, flags));
1975 }
1976 unlock_user(host_msg, msg, 0);
1977 return ret;
1978 }
1979
1980 /* do_recvfrom() Must return target values and target errnos. */
1981 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1982 abi_ulong target_addr,
1983 abi_ulong target_addrlen)
1984 {
1985 socklen_t addrlen;
1986 void *addr;
1987 void *host_msg;
1988 abi_long ret;
1989
1990 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1991 if (!host_msg)
1992 return -TARGET_EFAULT;
1993 if (target_addr) {
1994 if (get_user_u32(addrlen, target_addrlen)) {
1995 ret = -TARGET_EFAULT;
1996 goto fail;
1997 }
1998 if ((int)addrlen < 0) {
1999 ret = -TARGET_EINVAL;
2000 goto fail;
2001 }
2002 addr = alloca(addrlen);
2003 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2004 } else {
2005 addr = NULL; /* To keep compiler quiet. */
2006 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2007 }
2008 if (!is_error(ret)) {
2009 if (target_addr) {
2010 host_to_target_sockaddr(target_addr, addr, addrlen);
2011 if (put_user_u32(addrlen, target_addrlen)) {
2012 ret = -TARGET_EFAULT;
2013 goto fail;
2014 }
2015 }
2016 unlock_user(host_msg, msg, len);
2017 } else {
2018 fail:
2019 unlock_user(host_msg, msg, 0);
2020 }
2021 return ret;
2022 }
2023
2024 #ifdef TARGET_NR_socketcall
2025 /* do_socketcall() Must return target values and target errnos. */
2026 static abi_long do_socketcall(int num, abi_ulong vptr)
2027 {
2028 abi_long ret;
2029 const int n = sizeof(abi_ulong);
2030
2031 switch(num) {
2032 case SOCKOP_socket:
2033 {
2034 abi_ulong domain, type, protocol;
2035
2036 if (get_user_ual(domain, vptr)
2037 || get_user_ual(type, vptr + n)
2038 || get_user_ual(protocol, vptr + 2 * n))
2039 return -TARGET_EFAULT;
2040
2041 ret = do_socket(domain, type, protocol);
2042 }
2043 break;
2044 case SOCKOP_bind:
2045 {
2046 abi_ulong sockfd;
2047 abi_ulong target_addr;
2048 socklen_t addrlen;
2049
2050 if (get_user_ual(sockfd, vptr)
2051 || get_user_ual(target_addr, vptr + n)
2052 || get_user_ual(addrlen, vptr + 2 * n))
2053 return -TARGET_EFAULT;
2054
2055 ret = do_bind(sockfd, target_addr, addrlen);
2056 }
2057 break;
2058 case SOCKOP_connect:
2059 {
2060 abi_ulong sockfd;
2061 abi_ulong target_addr;
2062 socklen_t addrlen;
2063
2064 if (get_user_ual(sockfd, vptr)
2065 || get_user_ual(target_addr, vptr + n)
2066 || get_user_ual(addrlen, vptr + 2 * n))
2067 return -TARGET_EFAULT;
2068
2069 ret = do_connect(sockfd, target_addr, addrlen);
2070 }
2071 break;
2072 case SOCKOP_listen:
2073 {
2074 abi_ulong sockfd, backlog;
2075
2076 if (get_user_ual(sockfd, vptr)
2077 || get_user_ual(backlog, vptr + n))
2078 return -TARGET_EFAULT;
2079
2080 ret = get_errno(listen(sockfd, backlog));
2081 }
2082 break;
2083 case SOCKOP_accept:
2084 {
2085 abi_ulong sockfd;
2086 abi_ulong target_addr, target_addrlen;
2087
2088 if (get_user_ual(sockfd, vptr)
2089 || get_user_ual(target_addr, vptr + n)
2090 || get_user_ual(target_addrlen, vptr + 2 * n))
2091 return -TARGET_EFAULT;
2092
2093 ret = do_accept(sockfd, target_addr, target_addrlen);
2094 }
2095 break;
2096 case SOCKOP_getsockname:
2097 {
2098 abi_ulong sockfd;
2099 abi_ulong target_addr, target_addrlen;
2100
2101 if (get_user_ual(sockfd, vptr)
2102 || get_user_ual(target_addr, vptr + n)
2103 || get_user_ual(target_addrlen, vptr + 2 * n))
2104 return -TARGET_EFAULT;
2105
2106 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2107 }
2108 break;
2109 case SOCKOP_getpeername:
2110 {
2111 abi_ulong sockfd;
2112 abi_ulong target_addr, target_addrlen;
2113
2114 if (get_user_ual(sockfd, vptr)
2115 || get_user_ual(target_addr, vptr + n)
2116 || get_user_ual(target_addrlen, vptr + 2 * n))
2117 return -TARGET_EFAULT;
2118
2119 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2120 }
2121 break;
2122 case SOCKOP_socketpair:
2123 {
2124 abi_ulong domain, type, protocol;
2125 abi_ulong tab;
2126
2127 if (get_user_ual(domain, vptr)
2128 || get_user_ual(type, vptr + n)
2129 || get_user_ual(protocol, vptr + 2 * n)
2130 || get_user_ual(tab, vptr + 3 * n))
2131 return -TARGET_EFAULT;
2132
2133 ret = do_socketpair(domain, type, protocol, tab);
2134 }
2135 break;
2136 case SOCKOP_send:
2137 {
2138 abi_ulong sockfd;
2139 abi_ulong msg;
2140 size_t len;
2141 abi_ulong flags;
2142
2143 if (get_user_ual(sockfd, vptr)
2144 || get_user_ual(msg, vptr + n)
2145 || get_user_ual(len, vptr + 2 * n)
2146 || get_user_ual(flags, vptr + 3 * n))
2147 return -TARGET_EFAULT;
2148
2149 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2150 }
2151 break;
2152 case SOCKOP_recv:
2153 {
2154 abi_ulong sockfd;
2155 abi_ulong msg;
2156 size_t len;
2157 abi_ulong flags;
2158
2159 if (get_user_ual(sockfd, vptr)
2160 || get_user_ual(msg, vptr + n)
2161 || get_user_ual(len, vptr + 2 * n)
2162 || get_user_ual(flags, vptr + 3 * n))
2163 return -TARGET_EFAULT;
2164
2165 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2166 }
2167 break;
2168 case SOCKOP_sendto:
2169 {
2170 abi_ulong sockfd;
2171 abi_ulong msg;
2172 size_t len;
2173 abi_ulong flags;
2174 abi_ulong addr;
2175 socklen_t addrlen;
2176
2177 if (get_user_ual(sockfd, vptr)
2178 || get_user_ual(msg, vptr + n)
2179 || get_user_ual(len, vptr + 2 * n)
2180 || get_user_ual(flags, vptr + 3 * n)
2181 || get_user_ual(addr, vptr + 4 * n)
2182 || get_user_ual(addrlen, vptr + 5 * n))
2183 return -TARGET_EFAULT;
2184
2185 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2186 }
2187 break;
2188 case SOCKOP_recvfrom:
2189 {
2190 abi_ulong sockfd;
2191 abi_ulong msg;
2192 size_t len;
2193 abi_ulong flags;
2194 abi_ulong addr;
2195 socklen_t addrlen;
2196
2197 if (get_user_ual(sockfd, vptr)
2198 || get_user_ual(msg, vptr + n)
2199 || get_user_ual(len, vptr + 2 * n)
2200 || get_user_ual(flags, vptr + 3 * n)
2201 || get_user_ual(addr, vptr + 4 * n)
2202 || get_user_ual(addrlen, vptr + 5 * n))
2203 return -TARGET_EFAULT;
2204
2205 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2206 }
2207 break;
2208 case SOCKOP_shutdown:
2209 {
2210 abi_ulong sockfd, how;
2211
2212 if (get_user_ual(sockfd, vptr)
2213 || get_user_ual(how, vptr + n))
2214 return -TARGET_EFAULT;
2215
2216 ret = get_errno(shutdown(sockfd, how));
2217 }
2218 break;
2219 case SOCKOP_sendmsg:
2220 case SOCKOP_recvmsg:
2221 {
2222 abi_ulong fd;
2223 abi_ulong target_msg;
2224 abi_ulong flags;
2225
2226 if (get_user_ual(fd, vptr)
2227 || get_user_ual(target_msg, vptr + n)
2228 || get_user_ual(flags, vptr + 2 * n))
2229 return -TARGET_EFAULT;
2230
2231 ret = do_sendrecvmsg(fd, target_msg, flags,
2232 (num == SOCKOP_sendmsg));
2233 }
2234 break;
2235 case SOCKOP_setsockopt:
2236 {
2237 abi_ulong sockfd;
2238 abi_ulong level;
2239 abi_ulong optname;
2240 abi_ulong optval;
2241 socklen_t optlen;
2242
2243 if (get_user_ual(sockfd, vptr)
2244 || get_user_ual(level, vptr + n)
2245 || get_user_ual(optname, vptr + 2 * n)
2246 || get_user_ual(optval, vptr + 3 * n)
2247 || get_user_ual(optlen, vptr + 4 * n))
2248 return -TARGET_EFAULT;
2249
2250 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2251 }
2252 break;
2253 case SOCKOP_getsockopt:
2254 {
2255 abi_ulong sockfd;
2256 abi_ulong level;
2257 abi_ulong optname;
2258 abi_ulong optval;
2259 socklen_t optlen;
2260
2261 if (get_user_ual(sockfd, vptr)
2262 || get_user_ual(level, vptr + n)
2263 || get_user_ual(optname, vptr + 2 * n)
2264 || get_user_ual(optval, vptr + 3 * n)
2265 || get_user_ual(optlen, vptr + 4 * n))
2266 return -TARGET_EFAULT;
2267
2268 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2269 }
2270 break;
2271 default:
2272 gemu_log("Unsupported socketcall: %d\n", num);
2273 ret = -TARGET_ENOSYS;
2274 break;
2275 }
2276 return ret;
2277 }
2278 #endif
2279
2280 #define N_SHM_REGIONS 32
2281
2282 static struct shm_region {
2283 abi_ulong start;
2284 abi_ulong size;
2285 } shm_regions[N_SHM_REGIONS];
2286
2287 struct target_ipc_perm
2288 {
2289 abi_long __key;
2290 abi_ulong uid;
2291 abi_ulong gid;
2292 abi_ulong cuid;
2293 abi_ulong cgid;
2294 unsigned short int mode;
2295 unsigned short int __pad1;
2296 unsigned short int __seq;
2297 unsigned short int __pad2;
2298 abi_ulong __unused1;
2299 abi_ulong __unused2;
2300 };
2301
2302 struct target_semid_ds
2303 {
2304 struct target_ipc_perm sem_perm;
2305 abi_ulong sem_otime;
2306 abi_ulong __unused1;
2307 abi_ulong sem_ctime;
2308 abi_ulong __unused2;
2309 abi_ulong sem_nsems;
2310 abi_ulong __unused3;
2311 abi_ulong __unused4;
2312 };
2313
2314 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2315 abi_ulong target_addr)
2316 {
2317 struct target_ipc_perm *target_ip;
2318 struct target_semid_ds *target_sd;
2319
2320 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2321 return -TARGET_EFAULT;
2322 target_ip = &(target_sd->sem_perm);
2323 host_ip->__key = tswapl(target_ip->__key);
2324 host_ip->uid = tswapl(target_ip->uid);
2325 host_ip->gid = tswapl(target_ip->gid);
2326 host_ip->cuid = tswapl(target_ip->cuid);
2327 host_ip->cgid = tswapl(target_ip->cgid);
2328 host_ip->mode = tswapl(target_ip->mode);
2329 unlock_user_struct(target_sd, target_addr, 0);
2330 return 0;
2331 }
2332
2333 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2334 struct ipc_perm *host_ip)
2335 {
2336 struct target_ipc_perm *target_ip;
2337 struct target_semid_ds *target_sd;
2338
2339 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2340 return -TARGET_EFAULT;
2341 target_ip = &(target_sd->sem_perm);
2342 target_ip->__key = tswapl(host_ip->__key);
2343 target_ip->uid = tswapl(host_ip->uid);
2344 target_ip->gid = tswapl(host_ip->gid);
2345 target_ip->cuid = tswapl(host_ip->cuid);
2346 target_ip->cgid = tswapl(host_ip->cgid);
2347 target_ip->mode = tswapl(host_ip->mode);
2348 unlock_user_struct(target_sd, target_addr, 1);
2349 return 0;
2350 }
2351
2352 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2353 abi_ulong target_addr)
2354 {
2355 struct target_semid_ds *target_sd;
2356
2357 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2358 return -TARGET_EFAULT;
2359 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2360 return -TARGET_EFAULT;
2361 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2362 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2363 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2364 unlock_user_struct(target_sd, target_addr, 0);
2365 return 0;
2366 }
2367
2368 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2369 struct semid_ds *host_sd)
2370 {
2371 struct target_semid_ds *target_sd;
2372
2373 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2374 return -TARGET_EFAULT;
2375 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2376 return -TARGET_EFAULT;;
2377 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2378 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2379 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2380 unlock_user_struct(target_sd, target_addr, 1);
2381 return 0;
2382 }
2383
2384 struct target_seminfo {
2385 int semmap;
2386 int semmni;
2387 int semmns;
2388 int semmnu;
2389 int semmsl;
2390 int semopm;
2391 int semume;
2392 int semusz;
2393 int semvmx;
2394 int semaem;
2395 };
2396
2397 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2398 struct seminfo *host_seminfo)
2399 {
2400 struct target_seminfo *target_seminfo;
2401 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2402 return -TARGET_EFAULT;
2403 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2404 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2405 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2406 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2407 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2408 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2409 __put_user(host_seminfo->semume, &target_seminfo->semume);
2410 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2411 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2412 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2413 unlock_user_struct(target_seminfo, target_addr, 1);
2414 return 0;
2415 }
2416
2417 union semun {
2418 int val;
2419 struct semid_ds *buf;
2420 unsigned short *array;
2421 struct seminfo *__buf;
2422 };
2423
2424 union target_semun {
2425 int val;
2426 abi_ulong buf;
2427 abi_ulong array;
2428 abi_ulong __buf;
2429 };
2430
2431 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2432 abi_ulong target_addr)
2433 {
2434 int nsems;
2435 unsigned short *array;
2436 union semun semun;
2437 struct semid_ds semid_ds;
2438 int i, ret;
2439
2440 semun.buf = &semid_ds;
2441
2442 ret = semctl(semid, 0, IPC_STAT, semun);
2443 if (ret == -1)
2444 return get_errno(ret);
2445
2446 nsems = semid_ds.sem_nsems;
2447
2448 *host_array = malloc(nsems*sizeof(unsigned short));
2449 array = lock_user(VERIFY_READ, target_addr,
2450 nsems*sizeof(unsigned short), 1);
2451 if (!array)
2452 return -TARGET_EFAULT;
2453
2454 for(i=0; i<nsems; i++) {
2455 __get_user((*host_array)[i], &array[i]);
2456 }
2457 unlock_user(array, target_addr, 0);
2458
2459 return 0;
2460 }
2461
2462 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2463 unsigned short **host_array)
2464 {
2465 int nsems;
2466 unsigned short *array;
2467 union semun semun;
2468 struct semid_ds semid_ds;
2469 int i, ret;
2470
2471 semun.buf = &semid_ds;
2472
2473 ret = semctl(semid, 0, IPC_STAT, semun);
2474 if (ret == -1)
2475 return get_errno(ret);
2476
2477 nsems = semid_ds.sem_nsems;
2478
2479 array = lock_user(VERIFY_WRITE, target_addr,
2480 nsems*sizeof(unsigned short), 0);
2481 if (!array)
2482 return -TARGET_EFAULT;
2483
2484 for(i=0; i<nsems; i++) {
2485 __put_user((*host_array)[i], &array[i]);
2486 }
2487 free(*host_array);
2488 unlock_user(array, target_addr, 1);
2489
2490 return 0;
2491 }
2492
2493 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2494 union target_semun target_su)
2495 {
2496 union semun arg;
2497 struct semid_ds dsarg;
2498 unsigned short *array = NULL;
2499 struct seminfo seminfo;
2500 abi_long ret = -TARGET_EINVAL;
2501 abi_long err;
2502 cmd &= 0xff;
2503
2504 switch( cmd ) {
2505 case GETVAL:
2506 case SETVAL:
2507 arg.val = tswapl(target_su.val);
2508 ret = get_errno(semctl(semid, semnum, cmd, arg));
2509 target_su.val = tswapl(arg.val);
2510 break;
2511 case GETALL:
2512 case SETALL:
2513 err = target_to_host_semarray(semid, &array, target_su.array);
2514 if (err)
2515 return err;
2516 arg.array = array;
2517 ret = get_errno(semctl(semid, semnum, cmd, arg));
2518 err = host_to_target_semarray(semid, target_su.array, &array);
2519 if (err)
2520 return err;
2521 break;
2522 case IPC_STAT:
2523 case IPC_SET:
2524 case SEM_STAT:
2525 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2526 if (err)
2527 return err;
2528 arg.buf = &dsarg;
2529 ret = get_errno(semctl(semid, semnum, cmd, arg));
2530 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2531 if (err)
2532 return err;
2533 break;
2534 case IPC_INFO:
2535 case SEM_INFO:
2536 arg.__buf = &seminfo;
2537 ret = get_errno(semctl(semid, semnum, cmd, arg));
2538 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2539 if (err)
2540 return err;
2541 break;
2542 case IPC_RMID:
2543 case GETPID:
2544 case GETNCNT:
2545 case GETZCNT:
2546 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2547 break;
2548 }
2549
2550 return ret;
2551 }
2552
2553 struct target_sembuf {
2554 unsigned short sem_num;
2555 short sem_op;
2556 short sem_flg;
2557 };
2558
2559 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2560 abi_ulong target_addr,
2561 unsigned nsops)
2562 {
2563 struct target_sembuf *target_sembuf;
2564 int i;
2565
2566 target_sembuf = lock_user(VERIFY_READ, target_addr,
2567 nsops*sizeof(struct target_sembuf), 1);
2568 if (!target_sembuf)
2569 return -TARGET_EFAULT;
2570
2571 for(i=0; i<nsops; i++) {
2572 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2573 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2574 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2575 }
2576
2577 unlock_user(target_sembuf, target_addr, 0);
2578
2579 return 0;
2580 }
2581
2582 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2583 {
2584 struct sembuf sops[nsops];
2585
2586 if (target_to_host_sembuf(sops, ptr, nsops))
2587 return -TARGET_EFAULT;
2588
2589 return semop(semid, sops, nsops);
2590 }
2591
2592 struct target_msqid_ds
2593 {
2594 struct target_ipc_perm msg_perm;
2595 abi_ulong msg_stime;
2596 #if TARGET_ABI_BITS == 32
2597 abi_ulong __unused1;
2598 #endif
2599 abi_ulong msg_rtime;
2600 #if TARGET_ABI_BITS == 32
2601 abi_ulong __unused2;
2602 #endif
2603 abi_ulong msg_ctime;
2604 #if TARGET_ABI_BITS == 32
2605 abi_ulong __unused3;
2606 #endif
2607 abi_ulong __msg_cbytes;
2608 abi_ulong msg_qnum;
2609 abi_ulong msg_qbytes;
2610 abi_ulong msg_lspid;
2611 abi_ulong msg_lrpid;
2612 abi_ulong __unused4;
2613 abi_ulong __unused5;
2614 };
2615
2616 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2617 abi_ulong target_addr)
2618 {
2619 struct target_msqid_ds *target_md;
2620
2621 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2622 return -TARGET_EFAULT;
2623 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2624 return -TARGET_EFAULT;
2625 host_md->msg_stime = tswapl(target_md->msg_stime);
2626 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2627 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2628 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2629 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2630 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2631 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2632 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2633 unlock_user_struct(target_md, target_addr, 0);
2634 return 0;
2635 }
2636
2637 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2638 struct msqid_ds *host_md)
2639 {
2640 struct target_msqid_ds *target_md;
2641
2642 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2643 return -TARGET_EFAULT;
2644 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2645 return -TARGET_EFAULT;
2646 target_md->msg_stime = tswapl(host_md->msg_stime);
2647 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2648 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2649 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2650 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2651 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2652 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2653 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2654 unlock_user_struct(target_md, target_addr, 1);
2655 return 0;
2656 }
2657
2658 struct target_msginfo {
2659 int msgpool;
2660 int msgmap;
2661 int msgmax;
2662 int msgmnb;
2663 int msgmni;
2664 int msgssz;
2665 int msgtql;
2666 unsigned short int msgseg;
2667 };
2668
2669 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2670 struct msginfo *host_msginfo)
2671 {
2672 struct target_msginfo *target_msginfo;
2673 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2674 return -TARGET_EFAULT;
2675 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2676 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2677 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2678 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2679 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2680 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2681 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2682 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2683 unlock_user_struct(target_msginfo, target_addr, 1);
2684 return 0;
2685 }
2686
2687 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2688 {
2689 struct msqid_ds dsarg;
2690 struct msginfo msginfo;
2691 abi_long ret = -TARGET_EINVAL;
2692
2693 cmd &= 0xff;
2694
2695 switch (cmd) {
2696 case IPC_STAT:
2697 case IPC_SET:
2698 case MSG_STAT:
2699 if (target_to_host_msqid_ds(&dsarg,ptr))
2700 return -TARGET_EFAULT;
2701 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2702 if (host_to_target_msqid_ds(ptr,&dsarg))
2703 return -TARGET_EFAULT;
2704 break;
2705 case IPC_RMID:
2706 ret = get_errno(msgctl(msgid, cmd, NULL));
2707 break;
2708 case IPC_INFO:
2709 case MSG_INFO:
2710 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2711 if (host_to_target_msginfo(ptr, &msginfo))
2712 return -TARGET_EFAULT;
2713 break;
2714 }
2715
2716 return ret;
2717 }
2718
2719 struct target_msgbuf {
2720 abi_long mtype;
2721 char mtext[1];
2722 };
2723
2724 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2725 unsigned int msgsz, int msgflg)
2726 {
2727 struct target_msgbuf *target_mb;
2728 struct msgbuf *host_mb;
2729 abi_long ret = 0;
2730
2731 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2732 return -TARGET_EFAULT;
2733 host_mb = malloc(msgsz+sizeof(long));
2734 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2735 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2736 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2737 free(host_mb);
2738 unlock_user_struct(target_mb, msgp, 0);
2739
2740 return ret;
2741 }
2742
2743 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2744 unsigned int msgsz, abi_long msgtyp,
2745 int msgflg)
2746 {
2747 struct target_msgbuf *target_mb;
2748 char *target_mtext;
2749 struct msgbuf *host_mb;
2750 abi_long ret = 0;
2751
2752 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2753 return -TARGET_EFAULT;
2754
2755 host_mb = malloc(msgsz+sizeof(long));
2756 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2757
2758 if (ret > 0) {
2759 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2760 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2761 if (!target_mtext) {
2762 ret = -TARGET_EFAULT;
2763 goto end;
2764 }
2765 memcpy(target_mb->mtext, host_mb->mtext, ret);
2766 unlock_user(target_mtext, target_mtext_addr, ret);
2767 }
2768
2769 target_mb->mtype = tswapl(host_mb->mtype);
2770 free(host_mb);
2771
2772 end:
2773 if (target_mb)
2774 unlock_user_struct(target_mb, msgp, 1);
2775 return ret;
2776 }
2777
2778 struct target_shmid_ds
2779 {
2780 struct target_ipc_perm shm_perm;
2781 abi_ulong shm_segsz;
2782 abi_ulong shm_atime;
2783 #if TARGET_ABI_BITS == 32
2784 abi_ulong __unused1;
2785 #endif
2786 abi_ulong shm_dtime;
2787 #if TARGET_ABI_BITS == 32
2788 abi_ulong __unused2;
2789 #endif
2790 abi_ulong shm_ctime;
2791 #if TARGET_ABI_BITS == 32
2792 abi_ulong __unused3;
2793 #endif
2794 int shm_cpid;
2795 int shm_lpid;
2796 abi_ulong shm_nattch;
2797 unsigned long int __unused4;
2798 unsigned long int __unused5;
2799 };
2800
2801 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2802 abi_ulong target_addr)
2803 {
2804 struct target_shmid_ds *target_sd;
2805
2806 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2807 return -TARGET_EFAULT;
2808 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2809 return -TARGET_EFAULT;
2810 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2811 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2812 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2813 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2814 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2815 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2816 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2817 unlock_user_struct(target_sd, target_addr, 0);
2818 return 0;
2819 }
2820
2821 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2822 struct shmid_ds *host_sd)
2823 {
2824 struct target_shmid_ds *target_sd;
2825
2826 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2827 return -TARGET_EFAULT;
2828 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2829 return -TARGET_EFAULT;
2830 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2831 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2832 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2833 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2834 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2835 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2836 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2837 unlock_user_struct(target_sd, target_addr, 1);
2838 return 0;
2839 }
2840
2841 struct target_shminfo {
2842 abi_ulong shmmax;
2843 abi_ulong shmmin;
2844 abi_ulong shmmni;
2845 abi_ulong shmseg;
2846 abi_ulong shmall;
2847 };
2848
2849 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2850 struct shminfo *host_shminfo)
2851 {
2852 struct target_shminfo *target_shminfo;
2853 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2854 return -TARGET_EFAULT;
2855 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2856 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2857 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2858 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2859 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2860 unlock_user_struct(target_shminfo, target_addr, 1);
2861 return 0;
2862 }
2863
2864 struct target_shm_info {
2865 int used_ids;
2866 abi_ulong shm_tot;
2867 abi_ulong shm_rss;
2868 abi_ulong shm_swp;
2869 abi_ulong swap_attempts;
2870 abi_ulong swap_successes;
2871 };
2872
2873 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2874 struct shm_info *host_shm_info)
2875 {
2876 struct target_shm_info *target_shm_info;
2877 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2878 return -TARGET_EFAULT;
2879 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2880 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2881 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2882 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2883 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2884 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2885 unlock_user_struct(target_shm_info, target_addr, 1);
2886 return 0;
2887 }
2888
2889 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2890 {
2891 struct shmid_ds dsarg;
2892 struct shminfo shminfo;
2893 struct shm_info shm_info;
2894 abi_long ret = -TARGET_EINVAL;
2895
2896 cmd &= 0xff;
2897
2898 switch(cmd) {
2899 case IPC_STAT:
2900 case IPC_SET:
2901 case SHM_STAT:
2902 if (target_to_host_shmid_ds(&dsarg, buf))
2903 return -TARGET_EFAULT;
2904 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2905 if (host_to_target_shmid_ds(buf, &dsarg))
2906 return -TARGET_EFAULT;
2907 break;
2908 case IPC_INFO:
2909 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2910 if (host_to_target_shminfo(buf, &shminfo))
2911 return -TARGET_EFAULT;
2912 break;
2913 case SHM_INFO:
2914 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2915 if (host_to_target_shm_info(buf, &shm_info))
2916 return -TARGET_EFAULT;
2917 break;
2918 case IPC_RMID:
2919 case SHM_LOCK:
2920 case SHM_UNLOCK:
2921 ret = get_errno(shmctl(shmid, cmd, NULL));
2922 break;
2923 }
2924
2925 return ret;
2926 }
2927
2928 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2929 {
2930 abi_long raddr;
2931 void *host_raddr;
2932 struct shmid_ds shm_info;
2933 int i,ret;
2934
2935 /* find out the length of the shared memory segment */
2936 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2937 if (is_error(ret)) {
2938 /* can't get length, bail out */
2939 return ret;
2940 }
2941
2942 mmap_lock();
2943
2944 if (shmaddr)
2945 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2946 else {
2947 abi_ulong mmap_start;
2948
2949 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2950
2951 if (mmap_start == -1) {
2952 errno = ENOMEM;
2953 host_raddr = (void *)-1;
2954 } else
2955 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2956 }
2957
2958 if (host_raddr == (void *)-1) {
2959 mmap_unlock();
2960 return get_errno((long)host_raddr);
2961 }
2962 raddr=h2g((unsigned long)host_raddr);
2963
2964 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2965 PAGE_VALID | PAGE_READ |
2966 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2967
2968 for (i = 0; i < N_SHM_REGIONS; i++) {
2969 if (shm_regions[i].start == 0) {
2970 shm_regions[i].start = raddr;
2971 shm_regions[i].size = shm_info.shm_segsz;
2972 break;
2973 }
2974 }
2975
2976 mmap_unlock();
2977 return raddr;
2978
2979 }
2980
2981 static inline abi_long do_shmdt(abi_ulong shmaddr)
2982 {
2983 int i;
2984
2985 for (i = 0; i < N_SHM_REGIONS; ++i) {
2986 if (shm_regions[i].start == shmaddr) {
2987 shm_regions[i].start = 0;
2988 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2989 break;
2990 }
2991 }
2992
2993 return get_errno(shmdt(g2h(shmaddr)));
2994 }
2995
2996 #ifdef TARGET_NR_ipc
2997 /* ??? This only works with linear mappings. */
2998 /* do_ipc() must return target values and target errnos. */
2999 static abi_long do_ipc(unsigned int call, int first,
3000 int second, int third,
3001 abi_long ptr, abi_long fifth)
3002 {
3003 int version;
3004 abi_long ret = 0;
3005
3006 version = call >> 16;
3007 call &= 0xffff;
3008
3009 switch (call) {
3010 case IPCOP_semop:
3011 ret = do_semop(first, ptr, second);
3012 break;
3013
3014 case IPCOP_semget:
3015 ret = get_errno(semget(first, second, third));
3016 break;
3017
3018 case IPCOP_semctl:
3019 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3020 break;
3021
3022 case IPCOP_msgget:
3023 ret = get_errno(msgget(first, second));
3024 break;
3025
3026 case IPCOP_msgsnd:
3027 ret = do_msgsnd(first, ptr, second, third);
3028 break;
3029
3030 case IPCOP_msgctl:
3031 ret = do_msgctl(first, second, ptr);
3032 break;
3033
3034 case IPCOP_msgrcv:
3035 switch (version) {
3036 case 0:
3037 {
3038 struct target_ipc_kludge {
3039 abi_long msgp;
3040 abi_long msgtyp;
3041 } *tmp;
3042
3043 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3044 ret = -TARGET_EFAULT;
3045 break;
3046 }
3047
3048 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3049
3050 unlock_user_struct(tmp, ptr, 0);
3051 break;
3052 }
3053 default:
3054 ret = do_msgrcv(first, ptr, second, fifth, third);
3055 }
3056 break;
3057
3058 case IPCOP_shmat:
3059 switch (version) {
3060 default:
3061 {
3062 abi_ulong raddr;
3063 raddr = do_shmat(first, ptr, second);
3064 if (is_error(raddr))
3065 return get_errno(raddr);
3066 if (put_user_ual(raddr, third))
3067 return -TARGET_EFAULT;
3068 break;
3069 }
3070 case 1:
3071 ret = -TARGET_EINVAL;
3072 break;
3073 }
3074 break;
3075 case IPCOP_shmdt:
3076 ret = do_shmdt(ptr);
3077 break;
3078
3079 case IPCOP_shmget:
3080 /* IPC_* flag values are the same on all linux platforms */
3081 ret = get_errno(shmget(first, second, third));
3082 break;
3083
3084 /* IPC_* and SHM_* command values are the same on all linux platforms */
3085 case IPCOP_shmctl:
3086 ret = do_shmctl(first, second, third);
3087 break;
3088 default:
3089 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3090 ret = -TARGET_ENOSYS;
3091 break;
3092 }
3093 return ret;
3094 }
3095 #endif
3096
3097 /* kernel structure types definitions */
3098
3099 #define STRUCT(name, ...) STRUCT_ ## name,
3100 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3101 enum {
3102 #include "syscall_types.h"
3103 };
3104 #undef STRUCT
3105 #undef STRUCT_SPECIAL
3106
3107 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3108 #define STRUCT_SPECIAL(name)
3109 #include "syscall_types.h"
3110 #undef STRUCT
3111 #undef STRUCT_SPECIAL
3112
3113 typedef struct IOCTLEntry IOCTLEntry;
3114
3115 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3116 int fd, abi_long cmd, abi_long arg);
3117
3118 struct IOCTLEntry {
3119 unsigned int target_cmd;
3120 unsigned int host_cmd;
3121 const char *name;
3122 int access;
3123 do_ioctl_fn *do_ioctl;
3124 const argtype arg_type[5];
3125 };
3126
3127 #define IOC_R 0x0001
3128 #define IOC_W 0x0002
3129 #define IOC_RW (IOC_R | IOC_W)
3130
3131 #define MAX_STRUCT_SIZE 4096
3132
3133 #ifdef CONFIG_FIEMAP
3134 /* So fiemap access checks don't overflow on 32 bit systems.
3135 * This is very slightly smaller than the limit imposed by
3136 * the underlying kernel.
3137 */
3138 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3139 / sizeof(struct fiemap_extent))
3140
3141 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3142 int fd, abi_long cmd, abi_long arg)
3143 {
3144 /* The parameter for this ioctl is a struct fiemap followed
3145 * by an array of struct fiemap_extent whose size is set
3146 * in fiemap->fm_extent_count. The array is filled in by the
3147 * ioctl.
3148 */
3149 int target_size_in, target_size_out;
3150 struct fiemap *fm;
3151 const argtype *arg_type = ie->arg_type;
3152 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3153 void *argptr, *p;
3154 abi_long ret;
3155 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3156 uint32_t outbufsz;
3157 int free_fm = 0;
3158
3159 assert(arg_type[0] == TYPE_PTR);
3160 assert(ie->access == IOC_RW);
3161 arg_type++;
3162 target_size_in = thunk_type_size(arg_type, 0);
3163 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3164 if (!argptr) {
3165 return -TARGET_EFAULT;
3166 }
3167 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3168 unlock_user(argptr, arg, 0);
3169 fm = (struct fiemap *)buf_temp;
3170 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3171 return -TARGET_EINVAL;
3172 }
3173
3174 outbufsz = sizeof (*fm) +
3175 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3176
3177 if (outbufsz > MAX_STRUCT_SIZE) {
3178 /* We can't fit all the extents into the fixed size buffer.
3179 * Allocate one that is large enough and use it instead.
3180 */
3181 fm = malloc(outbufsz);
3182 if (!fm) {
3183 return -TARGET_ENOMEM;
3184 }
3185 memcpy(fm, buf_temp, sizeof(struct fiemap));
3186 free_fm = 1;
3187 }
3188 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3189 if (!is_error(ret)) {
3190 target_size_out = target_size_in;
3191 /* An extent_count of 0 means we were only counting the extents
3192 * so there are no structs to copy
3193 */
3194 if (fm->fm_extent_count != 0) {
3195 target_size_out += fm->fm_mapped_extents * extent_size;
3196 }
3197 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3198 if (!argptr) {
3199 ret = -TARGET_EFAULT;
3200 } else {
3201 /* Convert the struct fiemap */
3202 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3203 if (fm->fm_extent_count != 0) {
3204 p = argptr + target_size_in;
3205 /* ...and then all the struct fiemap_extents */
3206 for (i = 0; i < fm->fm_mapped_extents; i++) {
3207 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3208 THUNK_TARGET);
3209 p += extent_size;
3210 }
3211 }
3212 unlock_user(argptr, arg, target_size_out);
3213 }
3214 }
3215 if (free_fm) {
3216 free(fm);
3217 }
3218 return ret;
3219 }
3220 #endif
3221
3222 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3223 int fd, abi_long cmd, abi_long arg)
3224 {
3225 const argtype *arg_type = ie->arg_type;
3226 int target_size;
3227 void *argptr;
3228 int ret;
3229 struct ifconf *host_ifconf;
3230 uint32_t outbufsz;
3231 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3232 int target_ifreq_size;
3233 int nb_ifreq;
3234 int free_buf = 0;
3235 int i;
3236 int target_ifc_len;
3237 abi_long target_ifc_buf;
3238 int host_ifc_len;
3239 char *host_ifc_buf;
3240
3241 assert(arg_type[0] == TYPE_PTR);
3242 assert(ie->access == IOC_RW);
3243
3244 arg_type++;
3245 target_size = thunk_type_size(arg_type, 0);
3246
3247 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3248 if (!argptr)
3249 return -TARGET_EFAULT;
3250 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3251 unlock_user(argptr, arg, 0);
3252
3253 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3254 target_ifc_len = host_ifconf->ifc_len;
3255 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3256
3257 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3258 nb_ifreq = target_ifc_len / target_ifreq_size;
3259 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3260
3261 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3262 if (outbufsz > MAX_STRUCT_SIZE) {
3263 /* We can't fit all the extents into the fixed size buffer.
3264 * Allocate one that is large enough and use it instead.
3265 */
3266 host_ifconf = malloc(outbufsz);
3267 if (!host_ifconf) {
3268 return -TARGET_ENOMEM;
3269 }
3270 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3271 free_buf = 1;
3272 }
3273 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3274
3275 host_ifconf->ifc_len = host_ifc_len;
3276 host_ifconf->ifc_buf = host_ifc_buf;
3277
3278 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3279 if (!is_error(ret)) {
3280 /* convert host ifc_len to target ifc_len */
3281
3282 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3283 target_ifc_len = nb_ifreq * target_ifreq_size;
3284 host_ifconf->ifc_len = target_ifc_len;
3285
3286 /* restore target ifc_buf */
3287
3288 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3289
3290 /* copy struct ifconf to target user */
3291
3292 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3293 if (!argptr)
3294 return -TARGET_EFAULT;
3295 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3296 unlock_user(argptr, arg, target_size);
3297
3298 /* copy ifreq[] to target user */
3299
3300 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3301 for (i = 0; i < nb_ifreq ; i++) {
3302 thunk_convert(argptr + i * target_ifreq_size,
3303 host_ifc_buf + i * sizeof(struct ifreq),
3304 ifreq_arg_type, THUNK_TARGET);
3305 }
3306 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3307 }
3308
3309 if (free_buf) {
3310 free(host_ifconf);
3311 }
3312
3313 return ret;
3314 }
3315
3316 static IOCTLEntry ioctl_entries[] = {
3317 #define IOCTL(cmd, access, ...) \
3318 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3319 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3320 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3321 #include "ioctls.h"
3322 { 0, 0, },
3323 };
3324
3325 /* ??? Implement proper locking for ioctls. */
3326 /* do_ioctl() Must return target values and target errnos. */
3327 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3328 {
3329 const IOCTLEntry *ie;
3330 const argtype *arg_type;
3331 abi_long ret;
3332 uint8_t buf_temp[MAX_STRUCT_SIZE];
3333 int target_size;
3334 void *argptr;
3335
3336 ie = ioctl_entries;
3337 for(;;) {
3338 if (ie->target_cmd == 0) {
3339 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3340 return -TARGET_ENOSYS;
3341 }
3342 if (ie->target_cmd == cmd)
3343 break;
3344 ie++;
3345 }
3346 arg_type = ie->arg_type;
3347 #if defined(DEBUG)
3348 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3349 #endif
3350 if (ie->do_ioctl) {
3351 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3352 }
3353
3354 switch(arg_type[0]) {
3355 case TYPE_NULL:
3356 /* no argument */
3357 ret = get_errno(ioctl(fd, ie->host_cmd));
3358 break;
3359 case TYPE_PTRVOID:
3360 case TYPE_INT:
3361 /* int argment */
3362 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3363 break;
3364 case TYPE_PTR:
3365 arg_type++;
3366 target_size = thunk_type_size(arg_type, 0);
3367 switch(ie->access) {
3368 case IOC_R:
3369 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3370 if (!is_error(ret)) {
3371 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3372 if (!argptr)
3373 return -TARGET_EFAULT;
3374 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3375 unlock_user(argptr, arg, target_size);
3376 }
3377 break;
3378 case IOC_W:
3379 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3380 if (!argptr)
3381 return -TARGET_EFAULT;
3382 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3383 unlock_user(argptr, arg, 0);
3384 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3385 break;
3386 default:
3387 case IOC_RW:
3388 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3389 if (!argptr)
3390 return -TARGET_EFAULT;
3391 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3392 unlock_user(argptr, arg, 0);
3393 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3394 if (!is_error(ret)) {
3395 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3396 if (!argptr)
3397 return -TARGET_EFAULT;
3398 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3399 unlock_user(argptr, arg, target_size);
3400 }
3401 break;
3402 }
3403 break;
3404 default:
3405 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3406 (long)cmd, arg_type[0]);
3407 ret = -TARGET_ENOSYS;
3408 break;
3409 }
3410 return ret;
3411 }
3412
3413 static const bitmask_transtbl iflag_tbl[] = {
3414 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3415 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3416 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3417 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3418 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3419 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3420 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3421 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3422 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3423 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3424 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3425 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3426 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3427 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3428 { 0, 0, 0, 0 }
3429 };
3430
3431 static const bitmask_transtbl oflag_tbl[] = {
3432 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3433 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3434 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3435 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3436 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3437 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3438 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3439 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3440 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3441 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3442 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3443 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3444 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3445 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3446 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3447 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3448 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3449 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3450 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3451 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3452 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3453 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3454 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3455 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3456 { 0, 0, 0, 0 }
3457 };
3458
3459 static const bitmask_transtbl cflag_tbl[] = {
3460 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3461 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3462 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3463 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3464 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3465 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3466 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3467 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3468 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3469 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3470 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3471 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3472 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3473 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3474 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3475 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3476 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3477 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3478 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3479 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3480 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3481 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3482 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3483 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3484 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3485 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3486 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3487 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3488 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3489 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3490 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3491 { 0, 0, 0, 0 }
3492 };
3493
3494 static const bitmask_transtbl lflag_tbl[] = {
3495 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3496 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3497 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3498 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3499 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3500 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3501 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3502 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3503 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3504 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3505 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3506 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3507 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3508 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3509 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3510 { 0, 0, 0, 0 }
3511 };
3512
3513 static void target_to_host_termios (void *dst, const void *src)
3514 {
3515 struct host_termios *host = dst;
3516 const struct target_termios *target = src;
3517
3518 host->c_iflag =
3519 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3520 host->c_oflag =
3521 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3522 host->c_cflag =
3523 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3524 host->c_lflag =
3525 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3526 host->c_line = target->c_line;
3527
3528 memset(host->c_cc, 0, sizeof(host->c_cc));
3529 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3530 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3531 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3532 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3533 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3534 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3535 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3536 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3537 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3538 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3539 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3540 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3541 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3542 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3543 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3544 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3545 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3546 }
3547
3548 static void host_to_target_termios (void *dst, const void *src)
3549 {
3550 struct target_termios *target = dst;
3551 const struct host_termios *host = src;
3552
3553 target->c_iflag =
3554 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3555 target->c_oflag =
3556 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3557 target->c_cflag =
3558 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3559 target->c_lflag =
3560 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3561 target->c_line = host->c_line;
3562
3563 memset(target->c_cc, 0, sizeof(target->c_cc));
3564 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3565 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3566 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3567 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3568 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3569 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3570 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3571 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3572 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3573 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3574 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3575 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3576 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3577 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3578 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3579 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3580 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3581 }
3582
3583 static const StructEntry struct_termios_def = {
3584 .convert = { host_to_target_termios, target_to_host_termios },
3585 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3586 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3587 };
3588
3589 static bitmask_transtbl mmap_flags_tbl[] = {
3590 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3591 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3592 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3593 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3594 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3595 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3596 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3597 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3598 { 0, 0, 0, 0 }
3599 };
3600
3601 #if defined(TARGET_I386)
3602
3603 /* NOTE: there is really one LDT for all the threads */
3604 static uint8_t *ldt_table;
3605
3606 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3607 {
3608 int size;
3609 void *p;
3610
3611 if (!ldt_table)
3612 return 0;
3613 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3614 if (size > bytecount)
3615 size = bytecount;
3616 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3617 if (!p)
3618 return -TARGET_EFAULT;
3619 /* ??? Should this by byteswapped? */
3620 memcpy(p, ldt_table, size);
3621 unlock_user(p, ptr, size);
3622 return size;
3623 }
3624
3625 /* XXX: add locking support */
3626 static abi_long write_ldt(CPUX86State *env,
3627 abi_ulong ptr, unsigned long bytecount, int oldmode)
3628 {
3629 struct target_modify_ldt_ldt_s ldt_info;
3630 struct target_modify_ldt_ldt_s *target_ldt_info;
3631 int seg_32bit, contents, read_exec_only, limit_in_pages;
3632 int seg_not_present, useable, lm;
3633 uint32_t *lp, entry_1, entry_2;
3634
3635 if (bytecount != sizeof(ldt_info))
3636 return -TARGET_EINVAL;
3637 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3638 return -TARGET_EFAULT;
3639 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3640 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3641 ldt_info.limit = tswap32(target_ldt_info->limit);
3642 ldt_info.flags = tswap32(target_ldt_info->flags);
3643 unlock_user_struct(target_ldt_info, ptr, 0);
3644
3645 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3646 return -TARGET_EINVAL;
3647 seg_32bit = ldt_info.flags & 1;
3648 contents = (ldt_info.flags >> 1) & 3;
3649 read_exec_only = (ldt_info.flags >> 3) & 1;
3650 limit_in_pages = (ldt_info.flags >> 4) & 1;
3651 seg_not_present = (ldt_info.flags >> 5) & 1;
3652 useable = (ldt_info.flags >> 6) & 1;
3653 #ifdef TARGET_ABI32
3654 lm = 0;
3655 #else
3656 lm = (ldt_info.flags >> 7) & 1;
3657 #endif
3658 if (contents == 3) {
3659 if (oldmode)
3660 return -TARGET_EINVAL;
3661 if (seg_not_present == 0)
3662 return -TARGET_EINVAL;
3663 }
3664 /* allocate the LDT */
3665 if (!ldt_table) {
3666 env->ldt.base = target_mmap(0,
3667 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3668 PROT_READ|PROT_WRITE,
3669 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3670 if (env->ldt.base == -1)
3671 return -TARGET_ENOMEM;
3672 memset(g2h(env->ldt.base), 0,
3673 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3674 env->ldt.limit = 0xffff;
3675 ldt_table = g2h(env->ldt.base);
3676 }
3677
3678 /* NOTE: same code as Linux kernel */
3679 /* Allow LDTs to be cleared by the user. */
3680 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3681 if (oldmode ||
3682 (contents == 0 &&
3683 read_exec_only == 1 &&
3684 seg_32bit == 0 &&
3685 limit_in_pages == 0 &&
3686 seg_not_present == 1 &&
3687 useable == 0 )) {
3688 entry_1 = 0;
3689 entry_2 = 0;
3690 goto install;
3691 }
3692 }
3693
3694 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3695 (ldt_info.limit & 0x0ffff);
3696 entry_2 = (ldt_info.base_addr & 0xff000000) |
3697 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3698 (ldt_info.limit & 0xf0000) |
3699 ((read_exec_only ^ 1) << 9) |
3700 (contents << 10) |
3701 ((seg_not_present ^ 1) << 15) |
3702 (seg_32bit << 22) |
3703 (limit_in_pages << 23) |
3704 (lm << 21) |
3705 0x7000;
3706 if (!oldmode)
3707 entry_2 |= (useable << 20);
3708
3709 /* Install the new entry ... */
3710 install:
3711 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3712 lp[0] = tswap32(entry_1);
3713 lp[1] = tswap32(entry_2);
3714 return 0;
3715 }
3716
3717 /* specific and weird i386 syscalls */
3718 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3719 unsigned long bytecount)
3720 {
3721 abi_long ret;
3722
3723 switch (func) {
3724 case 0:
3725 ret = read_ldt(ptr, bytecount);
3726 break;
3727 case 1:
3728 ret = write_ldt(env, ptr, bytecount, 1);
3729 break;
3730 case 0x11:
3731 ret = write_ldt(env, ptr, bytecount, 0);
3732 break;
3733 default:
3734 ret = -TARGET_ENOSYS;
3735 break;
3736 }
3737 return ret;
3738 }
3739
3740 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3741 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3742 {
3743 uint64_t *gdt_table = g2h(env->gdt.base);
3744 struct target_modify_ldt_ldt_s ldt_info;
3745 struct target_modify_ldt_ldt_s *target_ldt_info;
3746 int seg_32bit, contents, read_exec_only, limit_in_pages;
3747 int seg_not_present, useable, lm;
3748 uint32_t *lp, entry_1, entry_2;
3749 int i;
3750
3751 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3752 if (!target_ldt_info)
3753 return -TARGET_EFAULT;
3754 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3755 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3756 ldt_info.limit = tswap32(target_ldt_info->limit);
3757 ldt_info.flags = tswap32(target_ldt_info->flags);
3758 if (ldt_info.entry_number == -1) {
3759 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3760 if (gdt_table[i] == 0) {
3761 ldt_info.entry_number = i;
3762 target_ldt_info->entry_number = tswap32(i);
3763 break;
3764 }
3765 }
3766 }
3767 unlock_user_struct(target_ldt_info, ptr, 1);
3768
3769 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3770 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3771 return -TARGET_EINVAL;
3772 seg_32bit = ldt_info.flags & 1;
3773 contents = (ldt_info.flags >> 1) & 3;
3774 read_exec_only = (ldt_info.flags >> 3) & 1;
3775 limit_in_pages = (ldt_info.flags >> 4) & 1;
3776 seg_not_present = (ldt_info.flags >> 5) & 1;
3777 useable = (ldt_info.flags >> 6) & 1;
3778 #ifdef TARGET_ABI32
3779 lm = 0;
3780 #else
3781 lm = (ldt_info.flags >> 7) & 1;
3782 #endif
3783
3784 if (contents == 3) {
3785 if (seg_not_present == 0)
3786 return -TARGET_EINVAL;
3787 }
3788
3789 /* NOTE: same code as Linux kernel */
3790 /* Allow LDTs to be cleared by the user. */
3791 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3792 if ((contents == 0 &&
3793 read_exec_only == 1 &&
3794 seg_32bit == 0 &&
3795 limit_in_pages == 0 &&
3796 seg_not_present == 1 &&
3797 useable == 0 )) {
3798 entry_1 = 0;
3799 entry_2 = 0;
3800 goto install;
3801 }
3802 }
3803
3804 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3805 (ldt_info.limit & 0x0ffff);
3806 entry_2 = (ldt_info.base_addr & 0xff000000) |
3807 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3808 (ldt_info.limit & 0xf0000) |
3809 ((read_exec_only ^ 1) << 9) |
3810 (contents << 10) |
3811 ((seg_not_present ^ 1) << 15) |
3812 (seg_32bit << 22) |
3813 (limit_in_pages << 23) |
3814 (useable << 20) |
3815 (lm << 21) |
3816 0x7000;
3817
3818 /* Install the new entry ... */
3819 install:
3820 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3821 lp[0] = tswap32(entry_1);
3822 lp[1] = tswap32(entry_2);
3823 return 0;
3824 }
3825
3826 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3827 {
3828 struct target_modify_ldt_ldt_s *target_ldt_info;
3829 uint64_t *gdt_table = g2h(env->gdt.base);
3830 uint32_t base_addr, limit, flags;
3831 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3832 int seg_not_present, useable, lm;
3833 uint32_t *lp, entry_1, entry_2;
3834
3835 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3836 if (!target_ldt_info)
3837 return -TARGET_EFAULT;
3838 idx = tswap32(target_ldt_info->entry_number);
3839 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3840 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3841 unlock_user_struct(target_ldt_info, ptr, 1);
3842 return -TARGET_EINVAL;
3843 }
3844 lp = (uint32_t *)(gdt_table + idx);
3845 entry_1 = tswap32(lp[0]);
3846 entry_2 = tswap32(lp[1]);
3847
3848 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3849 contents = (entry_2 >> 10) & 3;
3850 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3851 seg_32bit = (entry_2 >> 22) & 1;
3852 limit_in_pages = (entry_2 >> 23) & 1;
3853 useable = (entry_2 >> 20) & 1;
3854 #ifdef TARGET_ABI32
3855 lm = 0;
3856 #else
3857 lm = (entry_2 >> 21) & 1;
3858 #endif
3859 flags = (seg_32bit << 0) | (contents << 1) |
3860 (read_exec_only << 3) | (limit_in_pages << 4) |
3861 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3862 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3863 base_addr = (entry_1 >> 16) |
3864 (entry_2 & 0xff000000) |
3865 ((entry_2 & 0xff) << 16);
3866 target_ldt_info->base_addr = tswapl(base_addr);
3867 target_ldt_info->limit = tswap32(limit);
3868 target_ldt_info->flags = tswap32(flags);
3869 unlock_user_struct(target_ldt_info, ptr, 1);
3870 return 0;
3871 }
3872 #endif /* TARGET_I386 && TARGET_ABI32 */
3873
3874 #ifndef TARGET_ABI32
3875 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3876 {
3877 abi_long ret = 0;
3878 abi_ulong val;
3879 int idx;
3880
3881 switch(code) {
3882 case TARGET_ARCH_SET_GS:
3883 case TARGET_ARCH_SET_FS:
3884 if (code == TARGET_ARCH_SET_GS)
3885 idx = R_GS;
3886 else
3887 idx = R_FS;
3888 cpu_x86_load_seg(env, idx, 0);
3889 env->segs[idx].base = addr;
3890 break;
3891 case TARGET_ARCH_GET_GS:
3892 case TARGET_ARCH_GET_FS:
3893 if (code == TARGET_ARCH_GET_GS)
3894 idx = R_GS;
3895 else
3896 idx = R_FS;
3897 val = env->segs[idx].base;
3898 if (put_user(val, addr, abi_ulong))
3899 ret = -TARGET_EFAULT;
3900 break;
3901 default:
3902 ret = -TARGET_EINVAL;
3903 break;
3904 }
3905 return ret;
3906 }
3907 #endif
3908
3909 #endif /* defined(TARGET_I386) */
3910
3911 #define NEW_STACK_SIZE 0x40000
3912
3913 #if defined(CONFIG_USE_NPTL)
3914
3915 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3916 typedef struct {
3917 CPUState *env;
3918 pthread_mutex_t mutex;
3919 pthread_cond_t cond;
3920 pthread_t thread;
3921 uint32_t tid;
3922 abi_ulong child_tidptr;
3923 abi_ulong parent_tidptr;
3924 sigset_t sigmask;
3925 } new_thread_info;
3926
3927 static void *clone_func(void *arg)
3928 {
3929 new_thread_info *info = arg;
3930 CPUState *env;
3931 TaskState *ts;
3932
3933 env = info->env;
3934 thread_env = env;
3935 ts = (TaskState *)thread_env->opaque;
3936 info->tid = gettid();
3937 env->host_tid = info->tid;
3938 task_settid(ts);
3939 if (info->child_tidptr)
3940 put_user_u32(info->tid, info->child_tidptr);
3941 if (info->parent_tidptr)
3942 put_user_u32(info->tid, info->parent_tidptr);
3943 /* Enable signals. */
3944 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3945 /* Signal to the parent that we're ready. */
3946 pthread_mutex_lock(&info->mutex);
3947 pthread_cond_broadcast(&info->cond);
3948 pthread_mutex_unlock(&info->mutex);
3949 /* Wait until the parent has finshed initializing the tls state. */
3950 pthread_mutex_lock(&clone_lock);
3951 pthread_mutex_unlock(&clone_lock);
3952 cpu_loop(env);
3953 /* never exits */
3954 return NULL;
3955 }
3956 #else
3957
3958 static int clone_func(void *arg)
3959 {
3960 CPUState *env = arg;
3961 cpu_loop(env);
3962 /* never exits */
3963 return 0;
3964 }
3965 #endif
3966
3967 /* do_fork() Must return host values and target errnos (unlike most
3968 do_*() functions). */
3969 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3970 abi_ulong parent_tidptr, target_ulong newtls,
3971 abi_ulong child_tidptr)
3972 {
3973 int ret;
3974 TaskState *ts;
3975 CPUState *new_env;
3976 #if defined(CONFIG_USE_NPTL)
3977 unsigned int nptl_flags;
3978 sigset_t sigmask;
3979 #else
3980 uint8_t *new_stack;
3981 #endif
3982
3983 /* Emulate vfork() with fork() */
3984 if (flags & CLONE_VFORK)
3985 flags &= ~(CLONE_VFORK | CLONE_VM);
3986
3987 if (flags & CLONE_VM) {
3988 TaskState *parent_ts = (TaskState *)env->opaque;
3989 #if defined(CONFIG_USE_NPTL)
3990 new_thread_info info;
3991 pthread_attr_t attr;
3992 #endif
3993 ts = g_malloc0(sizeof(TaskState));
3994 init_task_state(ts);
3995 /* we create a new CPU instance. */
3996 new_env = cpu_copy(env);
3997 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3998 cpu_reset(new_env);
3999 #endif
4000 /* Init regs that differ from the parent. */
4001 cpu_clone_regs(new_env, newsp);
4002 new_env->opaque = ts;
4003 ts->bprm = parent_ts->bprm;
4004 ts->info = parent_ts->info;
4005 #if defined(CONFIG_USE_NPTL)
4006 nptl_flags = flags;
4007 flags &= ~CLONE_NPTL_FLAGS2;
4008
4009 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4010 ts->child_tidptr = child_tidptr;
4011 }
4012
4013 if (nptl_flags & CLONE_SETTLS)
4014 cpu_set_tls (new_env, newtls);
4015
4016 /* Grab a mutex so that thread setup appears atomic. */
4017 pthread_mutex_lock(&clone_lock);
4018
4019 memset(&info, 0, sizeof(info));
4020 pthread_mutex_init(&info.mutex, NULL);
4021 pthread_mutex_lock(&info.mutex);
4022 pthread_cond_init(&info.cond, NULL);
4023 info.env = new_env;
4024 if (nptl_flags & CLONE_CHILD_SETTID)
4025 info.child_tidptr = child_tidptr;
4026 if (nptl_flags & CLONE_PARENT_SETTID)
4027 info.parent_tidptr = parent_tidptr;
4028
4029 ret = pthread_attr_init(&attr);
4030 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4031 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4032 /* It is not safe to deliver signals until the child has finished
4033 initializing, so temporarily block all signals. */
4034 sigfillset(&sigmask);
4035 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4036
4037 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4038 /* TODO: Free new CPU state if thread creation failed. */
4039
4040 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4041 pthread_attr_destroy(&attr);
4042 if (ret == 0) {
4043 /* Wait for the child to initialize. */
4044 pthread_cond_wait(&info.cond, &info.mutex);
4045 ret = info.tid;
4046 if (flags & CLONE_PARENT_SETTID)
4047 put_user_u32(ret, parent_tidptr);
4048 } else {
4049 ret = -1;
4050 }
4051 pthread_mutex_unlock(&info.mutex);
4052 pthread_cond_destroy(&info.cond);
4053 pthread_mutex_destroy(&info.mutex);
4054 pthread_mutex_unlock(&clone_lock);
4055 #else
4056 if (flags & CLONE_NPTL_FLAGS2)
4057 return -EINVAL;
4058 /* This is probably going to die very quickly, but do it anyway. */
4059 new_stack = g_malloc0 (NEW_STACK_SIZE);
4060 #ifdef __ia64__
4061 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4062 #else
4063 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4064 #endif
4065 #endif
4066 } else {
4067 /* if no CLONE_VM, we consider it is a fork */
4068 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4069 return -EINVAL;
4070 fork_start();
4071 ret = fork();
4072 if (ret == 0) {
4073 /* Child Process. */
4074 cpu_clone_regs(env, newsp);
4075 fork_end(1);
4076 #if defined(CONFIG_USE_NPTL)
4077 /* There is a race condition here. The parent process could
4078 theoretically read the TID in the child process before the child
4079 tid is set. This would require using either ptrace
4080 (not implemented) or having *_tidptr to point at a shared memory
4081 mapping. We can't repeat the spinlock hack used above because
4082 the child process gets its own copy of the lock. */
4083 if (flags & CLONE_CHILD_SETTID)
4084 put_user_u32(gettid(), child_tidptr);
4085 if (flags & CLONE_PARENT_SETTID)
4086 put_user_u32(gettid(), parent_tidptr);
4087 ts = (TaskState *)env->opaque;
4088 if (flags & CLONE_SETTLS)
4089 cpu_set_tls (env, newtls);
4090 if (flags & CLONE_CHILD_CLEARTID)
4091 ts->child_tidptr = child_tidptr;
4092 #endif
4093 } else {
4094 fork_end(0);
4095 }
4096 }
4097 return ret;
4098 }
4099
4100 /* warning : doesn't handle linux specific flags... */
4101 static int target_to_host_fcntl_cmd(int cmd)
4102 {
4103 switch(cmd) {
4104 case TARGET_F_DUPFD:
4105 case TARGET_F_GETFD:
4106 case TARGET_F_SETFD:
4107 case TARGET_F_GETFL:
4108 case TARGET_F_SETFL:
4109 return cmd;
4110 case TARGET_F_GETLK:
4111 return F_GETLK;
4112 case TARGET_F_SETLK:
4113 return F_SETLK;
4114 case TARGET_F_SETLKW:
4115 return F_SETLKW;
4116 case TARGET_F_GETOWN:
4117 return F_GETOWN;
4118 case TARGET_F_SETOWN:
4119 return F_SETOWN;
4120 case TARGET_F_GETSIG:
4121 return F_GETSIG;
4122 case TARGET_F_SETSIG:
4123 return F_SETSIG;
4124 #if TARGET_ABI_BITS == 32
4125 case TARGET_F_GETLK64:
4126 return F_GETLK64;
4127 case TARGET_F_SETLK64:
4128 return F_SETLK64;
4129 case TARGET_F_SETLKW64:
4130 return F_SETLKW64;
4131 #endif
4132 case TARGET_F_SETLEASE:
4133 return F_SETLEASE;
4134 case TARGET_F_GETLEASE:
4135 return F_GETLEASE;
4136 #ifdef F_DUPFD_CLOEXEC
4137 case TARGET_F_DUPFD_CLOEXEC:
4138 return F_DUPFD_CLOEXEC;
4139 #endif
4140 case TARGET_F_NOTIFY:
4141 return F_NOTIFY;
4142 default:
4143 return -TARGET_EINVAL;
4144 }
4145 return -TARGET_EINVAL;
4146 }
4147
4148 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4149 {
4150 struct flock fl;
4151 struct target_flock *target_fl;
4152 struct flock64 fl64;
4153 struct target_flock64 *target_fl64;
4154 abi_long ret;
4155 int host_cmd = target_to_host_fcntl_cmd(cmd);
4156
4157 if (host_cmd == -TARGET_EINVAL)
4158 return host_cmd;
4159
4160 switch(cmd) {
4161 case TARGET_F_GETLK:
4162 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4163 return -TARGET_EFAULT;
4164 fl.l_type = tswap16(target_fl->l_type);
4165 fl.l_whence = tswap16(target_fl->l_whence);
4166 fl.l_start = tswapl(target_fl->l_start);
4167 fl.l_len = tswapl(target_fl->l_len);
4168 fl.l_pid = tswap32(target_fl->l_pid);
4169 unlock_user_struct(target_fl, arg, 0);
4170 ret = get_errno(fcntl(fd, host_cmd, &fl));
4171 if (ret == 0) {
4172 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4173 return -TARGET_EFAULT;
4174 target_fl->l_type = tswap16(fl.l_type);
4175 target_fl->l_whence = tswap16(fl.l_whence);
4176 target_fl->l_start = tswapl(fl.l_start);
4177 target_fl->l_len = tswapl(fl.l_len);
4178 target_fl->l_pid = tswap32(fl.l_pid);
4179 unlock_user_struct(target_fl, arg, 1);
4180 }
4181 break;
4182
4183 case TARGET_F_SETLK:
4184 case TARGET_F_SETLKW:
4185 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4186 return -TARGET_EFAULT;
4187 fl.l_type = tswap16(target_fl->l_type);
4188 fl.l_whence = tswap16(target_fl->l_whence);
4189 fl.l_start = tswapl(target_fl->l_start);
4190 fl.l_len = tswapl(target_fl->l_len);
4191 fl.l_pid = tswap32(target_fl->l_pid);
4192 unlock_user_struct(target_fl, arg, 0);
4193 ret = get_errno(fcntl(fd, host_cmd, &fl));
4194 break;
4195
4196 case TARGET_F_GETLK64:
4197 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4198 return -TARGET_EFAULT;
4199 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4200 fl64.l_whence = tswap16(target_fl64->l_whence);
4201 fl64.l_start = tswapl(target_fl64->l_start);
4202 fl64.l_len = tswapl(target_fl64->l_len);
4203 fl64.l_pid = tswap32(target_fl64->l_pid);
4204 unlock_user_struct(target_fl64, arg, 0);
4205 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4206 if (ret == 0) {
4207 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4208 return -TARGET_EFAULT;
4209 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4210 target_fl64->l_whence = tswap16(fl64.l_whence);
4211 target_fl64->l_start = tswapl(fl64.l_start);
4212 target_fl64->l_len = tswapl(fl64.l_len);
4213 target_fl64->l_pid = tswap32(fl64.l_pid);
4214 unlock_user_struct(target_fl64, arg, 1);
4215 }
4216 break;
4217 case TARGET_F_SETLK64:
4218 case TARGET_F_SETLKW64:
4219 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4220 return -TARGET_EFAULT;
4221 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4222 fl64.l_whence = tswap16(target_fl64->l_whence);
4223 fl64.l_start = tswapl(target_fl64->l_start);
4224 fl64.l_len = tswapl(target_fl64->l_len);
4225 fl64.l_pid = tswap32(target_fl64->l_pid);
4226 unlock_user_struct(target_fl64, arg, 0);
4227 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4228 break;
4229
4230 case TARGET_F_GETFL:
4231 ret = get_errno(fcntl(fd, host_cmd, arg));
4232 if (ret >= 0) {
4233 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4234 }
4235 break;
4236
4237 case TARGET_F_SETFL:
4238 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4239 break;
4240
4241 case TARGET_F_SETOWN:
4242 case TARGET_F_GETOWN:
4243 case TARGET_F_SETSIG:
4244 case TARGET_F_GETSIG:
4245 case TARGET_F_SETLEASE:
4246 case TARGET_F_GETLEASE:
4247 ret = get_errno(fcntl(fd, host_cmd, arg));
4248 break;
4249
4250 default:
4251 ret = get_errno(fcntl(fd, cmd, arg));
4252 break;
4253 }
4254 return ret;
4255 }
4256
4257 #ifdef USE_UID16
4258
4259 static inline int high2lowuid(int uid)
4260 {
4261 if (uid > 65535)
4262 return 65534;
4263 else
4264 return uid;
4265 }
4266
4267 static inline int high2lowgid(int gid)
4268 {
4269 if (gid > 65535)
4270 return 65534;
4271 else
4272 return gid;
4273 }
4274
4275 static inline int low2highuid(int uid)
4276 {
4277 if ((int16_t)uid == -1)
4278 return -1;
4279 else
4280 return uid;
4281 }
4282
4283 static inline int low2highgid(int gid)
4284 {
4285 if ((int16_t)gid == -1)
4286 return -1;
4287 else
4288 return gid;
4289 }
4290 static inline int tswapid(int id)
4291 {
4292 return tswap16(id);
4293 }
4294 #else /* !USE_UID16 */
4295 static inline int high2lowuid(int uid)
4296 {
4297 return uid;
4298 }
4299 static inline int high2lowgid(int gid)
4300 {
4301 return gid;
4302 }
4303 static inline int low2highuid(int uid)
4304 {
4305 return uid;
4306 }
4307 static inline int low2highgid(int gid)
4308 {
4309 return gid;
4310 }
4311 static inline int tswapid(int id)
4312 {
4313 return tswap32(id);
4314 }
4315 #endif /* USE_UID16 */
4316
4317 void syscall_init(void)
4318 {
4319 IOCTLEntry *ie;
4320 const argtype *arg_type;
4321 int size;
4322 int i;
4323
4324 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4325 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4326 #include "syscall_types.h"
4327 #undef STRUCT
4328 #undef STRUCT_SPECIAL
4329
4330 /* we patch the ioctl size if necessary. We rely on the fact that
4331 no ioctl has all the bits at '1' in the size field */
4332 ie = ioctl_entries;
4333 while (ie->target_cmd != 0) {
4334 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4335 TARGET_IOC_SIZEMASK) {
4336 arg_type = ie->arg_type;
4337 if (arg_type[0] != TYPE_PTR) {
4338 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4339 ie->target_cmd);
4340 exit(1);
4341 }
4342 arg_type++;
4343 size = thunk_type_size(arg_type, 0);
4344 ie->target_cmd = (ie->target_cmd &
4345 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4346 (size << TARGET_IOC_SIZESHIFT);
4347 }
4348
4349 /* Build target_to_host_errno_table[] table from
4350 * host_to_target_errno_table[]. */
4351 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4352 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4353
4354 /* automatic consistency check if same arch */
4355 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4356 (defined(__x86_64__) && defined(TARGET_X86_64))
4357 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4358 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4359 ie->name, ie->target_cmd, ie->host_cmd);
4360 }
4361 #endif
4362 ie++;
4363 }
4364 }
4365
4366 #if TARGET_ABI_BITS == 32
4367 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4368 {
4369 #ifdef TARGET_WORDS_BIGENDIAN
4370 return ((uint64_t)word0 << 32) | word1;
4371 #else
4372 return ((uint64_t)word1 << 32) | word0;
4373 #endif
4374 }
4375 #else /* TARGET_ABI_BITS == 32 */
4376 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4377 {
4378 return word0;
4379 }
4380 #endif /* TARGET_ABI_BITS != 32 */
4381
4382 #ifdef TARGET_NR_truncate64
4383 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4384 abi_long arg2,
4385 abi_long arg3,
4386 abi_long arg4)
4387 {
4388 if (regpairs_aligned(cpu_env)) {
4389 arg2 = arg3;
4390 arg3 = arg4;
4391 }
4392 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4393 }
4394 #endif
4395
4396 #ifdef TARGET_NR_ftruncate64
4397 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4398 abi_long arg2,
4399 abi_long arg3,
4400 abi_long arg4)
4401 {
4402 if (regpairs_aligned(cpu_env)) {
4403 arg2 = arg3;
4404 arg3 = arg4;
4405 }
4406 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4407 }
4408 #endif
4409
4410 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4411 abi_ulong target_addr)
4412 {
4413 struct target_timespec *target_ts;
4414
4415 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4416 return -TARGET_EFAULT;
4417 host_ts->tv_sec = tswapl(target_ts->tv_sec);
4418 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
4419 unlock_user_struct(target_ts, target_addr, 0);
4420 return 0;
4421 }
4422
4423 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4424 struct timespec *host_ts)
4425 {
4426 struct target_timespec *target_ts;
4427
4428 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4429 return -TARGET_EFAULT;
4430 target_ts->tv_sec = tswapl(host_ts->tv_sec);
4431 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
4432 unlock_user_struct(target_ts, target_addr, 1);
4433 return 0;
4434 }
4435
4436 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4437 static inline abi_long host_to_target_stat64(void *cpu_env,
4438 abi_ulong target_addr,
4439 struct stat *host_st)
4440 {
4441 #ifdef TARGET_ARM
4442 if (((CPUARMState *)cpu_env)->eabi) {
4443 struct target_eabi_stat64 *target_st;
4444
4445 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4446 return -TARGET_EFAULT;
4447 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4448 __put_user(host_st->st_dev, &target_st->st_dev);
4449 __put_user(host_st->st_ino, &target_st->st_ino);
4450 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4451 __put_user(host_st->st_ino, &target_st->__st_ino);
4452 #endif
4453 __put_user(host_st->st_mode, &target_st->st_mode);
4454 __put_user(host_st->st_nlink, &target_st->st_nlink);
4455 __put_user(host_st->st_uid, &target_st->st_uid);
4456 __put_user(host_st->st_gid, &target_st->st_gid);
4457 __put_user(host_st->st_rdev, &target_st->st_rdev);
4458 __put_user(host_st->st_size, &target_st->st_size);
4459 __put_user(host_st->st_blksize, &target_st->st_blksize);
4460 __put_user(host_st->st_blocks, &target_st->st_blocks);
4461 __put_user(host_st->st_atime, &target_st->target_st_atime);
4462 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4463 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4464 unlock_user_struct(target_st, target_addr, 1);
4465 } else
4466 #endif
4467 {
4468 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4469 struct target_stat *target_st;
4470 #else
4471 struct target_stat64 *target_st;
4472 #endif
4473
4474 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4475 return -TARGET_EFAULT;
4476 memset(target_st, 0, sizeof(*target_st));
4477 __put_user(host_st->st_dev, &target_st->st_dev);
4478 __put_user(host_st->st_ino, &target_st->st_ino);
4479 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4480 __put_user(host_st->st_ino, &target_st->__st_ino);
4481 #endif
4482 __put_user(host_st->st_mode, &target_st->st_mode);
4483 __put_user(host_st->st_nlink, &target_st->st_nlink);
4484 __put_user(host_st->st_uid, &target_st->st_uid);
4485 __put_user(host_st->st_gid, &target_st->st_gid);
4486 __put_user(host_st->st_rdev, &target_st->st_rdev);
4487 /* XXX: better use of kernel struct */
4488 __put_user(host_st->st_size, &target_st->st_size);
4489 __put_user(host_st->st_blksize, &target_st->st_blksize);
4490 __put_user(host_st->st_blocks, &target_st->st_blocks);
4491 __put_user(host_st->st_atime, &target_st->target_st_atime);
4492 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4493 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4494 unlock_user_struct(target_st, target_addr, 1);
4495 }
4496
4497 return 0;
4498 }
4499 #endif
4500
4501 #if defined(CONFIG_USE_NPTL)
4502 /* ??? Using host futex calls even when target atomic operations
4503 are not really atomic probably breaks things. However implementing
4504 futexes locally would make futexes shared between multiple processes
4505 tricky. However they're probably useless because guest atomic
4506 operations won't work either. */
4507 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4508 target_ulong uaddr2, int val3)
4509 {
4510 struct timespec ts, *pts;
4511 int base_op;
4512
4513 /* ??? We assume FUTEX_* constants are the same on both host
4514 and target. */
4515 #ifdef FUTEX_CMD_MASK
4516 base_op = op & FUTEX_CMD_MASK;
4517 #else
4518 base_op = op;
4519 #endif
4520 switch (base_op) {
4521 case FUTEX_WAIT:
4522 if (timeout) {
4523 pts = &ts;
4524 target_to_host_timespec(pts, timeout);
4525 } else {
4526 pts = NULL;
4527 }
4528 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4529 pts, NULL, 0));
4530 case FUTEX_WAKE:
4531 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4532 case FUTEX_FD:
4533 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4534 case FUTEX_REQUEUE:
4535 case FUTEX_CMP_REQUEUE:
4536 case FUTEX_WAKE_OP:
4537 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4538 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4539 But the prototype takes a `struct timespec *'; insert casts
4540 to satisfy the compiler. We do not need to tswap TIMEOUT
4541 since it's not compared to guest memory. */
4542 pts = (struct timespec *)(uintptr_t) timeout;
4543 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4544 g2h(uaddr2),
4545 (base_op == FUTEX_CMP_REQUEUE
4546 ? tswap32(val3)
4547 : val3)));
4548 default:
4549 return -TARGET_ENOSYS;
4550 }
4551 }
4552 #endif
4553
4554 /* Map host to target signal numbers for the wait family of syscalls.
4555 Assume all other status bits are the same. */
4556 static int host_to_target_waitstatus(int status)
4557 {
4558 if (WIFSIGNALED(status)) {
4559 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4560 }
4561 if (WIFSTOPPED(status)) {
4562 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4563 | (status & 0xff);
4564 }
4565 return status;
4566 }
4567
4568 int get_osversion(void)
4569 {
4570 static int osversion;
4571 struct new_utsname buf;
4572 const char *s;
4573 int i, n, tmp;
4574 if (osversion)
4575 return osversion;
4576 if (qemu_uname_release && *qemu_uname_release) {
4577 s = qemu_uname_release;
4578 } else {
4579 if (sys_uname(&buf))
4580 return 0;
4581 s = buf.release;
4582 }
4583 tmp = 0;
4584 for (i = 0; i < 3; i++) {
4585 n = 0;
4586 while (*s >= '0' && *s <= '9') {
4587 n *= 10;
4588 n += *s - '0';
4589 s++;
4590 }
4591 tmp = (tmp << 8) + n;
4592 if (*s == '.')
4593 s++;
4594 }
4595 osversion = tmp;
4596 return osversion;
4597 }
4598
4599 /* do_syscall() should always have a single exit point at the end so
4600 that actions, such as logging of syscall results, can be performed.
4601 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4602 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4603 abi_long arg2, abi_long arg3, abi_long arg4,
4604 abi_long arg5, abi_long arg6, abi_long arg7,
4605 abi_long arg8)
4606 {
4607 abi_long ret;
4608 struct stat st;
4609 struct statfs stfs;
4610 void *p;
4611
4612 #ifdef DEBUG
4613 gemu_log("syscall %d", num);
4614 #endif
4615 if(do_strace)
4616 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4617
4618 switch(num) {
4619 case TARGET_NR_exit:
4620 #ifdef CONFIG_USE_NPTL
4621 /* In old applications this may be used to implement _exit(2).
4622 However in threaded applictions it is used for thread termination,
4623 and _exit_group is used for application termination.
4624 Do thread termination if we have more then one thread. */
4625 /* FIXME: This probably breaks if a signal arrives. We should probably
4626 be disabling signals. */
4627 if (first_cpu->next_cpu) {
4628 TaskState *ts;
4629 CPUState **lastp;
4630 CPUState *p;
4631
4632 cpu_list_lock();
4633 lastp = &first_cpu;
4634 p = first_cpu;
4635 while (p && p != (CPUState *)cpu_env) {
4636 lastp = &p->next_cpu;
4637 p = p->next_cpu;
4638 }
4639 /* If we didn't find the CPU for this thread then something is
4640 horribly wrong. */
4641 if (!p)
4642 abort();
4643 /* Remove the CPU from the list. */
4644 *lastp = p->next_cpu;
4645 cpu_list_unlock();
4646 ts = ((CPUState *)cpu_env)->opaque;
4647 if (ts->child_tidptr) {
4648 put_user_u32(0, ts->child_tidptr);
4649 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4650 NULL, NULL, 0);
4651 }
4652 thread_env = NULL;
4653 g_free(cpu_env);
4654 g_free(ts);
4655 pthread_exit(NULL);
4656 }
4657 #endif
4658 #ifdef TARGET_GPROF
4659 _mcleanup();
4660 #endif
4661 gdb_exit(cpu_env, arg1);
4662 _exit(arg1);
4663 ret = 0; /* avoid warning */
4664 break;
4665 case TARGET_NR_read:
4666 if (arg3 == 0)
4667 ret = 0;
4668 else {
4669 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4670 goto efault;
4671 ret = get_errno(read(arg1, p, arg3));
4672 unlock_user(p, arg2, ret);
4673 }
4674 break;
4675 case TARGET_NR_write:
4676 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4677 goto efault;
4678 ret = get_errno(write(arg1, p, arg3));
4679 unlock_user(p, arg2, 0);
4680 break;
4681 case TARGET_NR_open:
4682 if (!(p = lock_user_string(arg1)))
4683 goto efault;
4684 ret = get_errno(open(path(p),
4685 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4686 arg3));
4687 unlock_user(p, arg1, 0);
4688 break;
4689 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4690 case TARGET_NR_openat:
4691 if (!(p = lock_user_string(arg2)))
4692 goto efault;
4693 ret = get_errno(sys_openat(arg1,
4694 path(p),
4695 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4696 arg4));
4697 unlock_user(p, arg2, 0);
4698 break;
4699 #endif
4700 case TARGET_NR_close:
4701 ret = get_errno(close(arg1));
4702 break;
4703 case TARGET_NR_brk:
4704 ret = do_brk(arg1);
4705 break;
4706 case TARGET_NR_fork:
4707 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4708 break;
4709 #ifdef TARGET_NR_waitpid
4710 case TARGET_NR_waitpid:
4711 {
4712 int status;
4713 ret = get_errno(waitpid(arg1, &status, arg3));
4714 if (!is_error(ret) && arg2
4715 && put_user_s32(host_to_target_waitstatus(status), arg2))
4716 goto efault;
4717 }
4718 break;
4719 #endif
4720 #ifdef TARGET_NR_waitid
4721 case TARGET_NR_waitid:
4722 {
4723 siginfo_t info;
4724 info.si_pid = 0;
4725 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4726 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4727 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4728 goto efault;
4729 host_to_target_siginfo(p, &info);
4730 unlock_user(p, arg3, sizeof(target_siginfo_t));
4731 }
4732 }
4733 break;
4734 #endif
4735 #ifdef TARGET_NR_creat /* not on alpha */
4736 case TARGET_NR_creat:
4737 if (!(p = lock_user_string(arg1)))
4738 goto efault;
4739 ret = get_errno(creat(p, arg2));
4740 unlock_user(p, arg1, 0);
4741 break;
4742 #endif
4743 case TARGET_NR_link:
4744 {
4745 void * p2;
4746 p = lock_user_string(arg1);
4747 p2 = lock_user_string(arg2);
4748 if (!p || !p2)
4749 ret = -TARGET_EFAULT;
4750 else
4751 ret = get_errno(link(p, p2));
4752 unlock_user(p2, arg2, 0);
4753 unlock_user(p, arg1, 0);
4754 }
4755 break;
4756 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4757 case TARGET_NR_linkat:
4758 {
4759 void * p2 = NULL;
4760 if (!arg2 || !arg4)
4761 goto efault;
4762 p = lock_user_string(arg2);
4763 p2 = lock_user_string(arg4);
4764 if (!p || !p2)
4765 ret = -TARGET_EFAULT;
4766 else
4767 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4768 unlock_user(p, arg2, 0);
4769 unlock_user(p2, arg4, 0);
4770 }
4771 break;
4772 #endif
4773 case TARGET_NR_unlink:
4774 if (!(p = lock_user_string(arg1)))
4775 goto efault;
4776 ret = get_errno(unlink(p));
4777 unlock_user(p, arg1, 0);
4778 break;
4779 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4780 case TARGET_NR_unlinkat:
4781 if (!(p = lock_user_string(arg2)))
4782 goto efault;
4783 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4784 unlock_user(p, arg2, 0);
4785 break;
4786 #endif
4787 case TARGET_NR_execve:
4788 {
4789 char **argp, **envp;
4790 int argc, envc;
4791 abi_ulong gp;
4792 abi_ulong guest_argp;
4793 abi_ulong guest_envp;
4794 abi_ulong addr;
4795 char **q;
4796
4797 argc = 0;
4798 guest_argp = arg2;
4799 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4800 if (get_user_ual(addr, gp))
4801 goto efault;
4802 if (!addr)
4803 break;
4804 argc++;
4805 }
4806 envc = 0;
4807 guest_envp = arg3;
4808 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4809 if (get_user_ual(addr, gp))
4810 goto efault;
4811 if (!addr)
4812 break;
4813 envc++;
4814 }
4815
4816 argp = alloca((argc + 1) * sizeof(void *));
4817 envp = alloca((envc + 1) * sizeof(void *));
4818
4819 for (gp = guest_argp, q = argp; gp;
4820 gp += sizeof(abi_ulong), q++) {
4821 if (get_user_ual(addr, gp))
4822 goto execve_efault;
4823 if (!addr)
4824 break;
4825 if (!(*q = lock_user_string(addr)))
4826 goto execve_efault;
4827 }
4828 *q = NULL;
4829
4830 for (gp = guest_envp, q = envp; gp;
4831 gp += sizeof(abi_ulong), q++) {
4832 if (get_user_ual(addr, gp))
4833 goto execve_efault;
4834 if (!addr)
4835 break;
4836 if (!(*q = lock_user_string(addr)))
4837 goto execve_efault;
4838 }
4839 *q = NULL;
4840
4841 if (!(p = lock_user_string(arg1)))
4842 goto execve_efault;
4843 ret = get_errno(execve(p, argp, envp));
4844 unlock_user(p, arg1, 0);
4845
4846 goto execve_end;
4847
4848 execve_efault:
4849 ret = -TARGET_EFAULT;
4850
4851 execve_end:
4852 for (gp = guest_argp, q = argp; *q;
4853 gp += sizeof(abi_ulong), q++) {
4854 if (get_user_ual(addr, gp)
4855 || !addr)
4856 break;
4857 unlock_user(*q, addr, 0);
4858 }
4859 for (gp = guest_envp, q = envp; *q;
4860 gp += sizeof(abi_ulong), q++) {
4861 if (get_user_ual(addr, gp)
4862 || !addr)
4863 break;
4864 unlock_user(*q, addr, 0);
4865 }
4866 }
4867 break;
4868 case TARGET_NR_chdir:
4869 if (!(p = lock_user_string(arg1)))
4870 goto efault;
4871 ret = get_errno(chdir(p));
4872 unlock_user(p, arg1, 0);
4873 break;
4874 #ifdef TARGET_NR_time
4875 case TARGET_NR_time:
4876 {
4877 time_t host_time;
4878 ret = get_errno(time(&host_time));
4879 if (!is_error(ret)
4880 && arg1
4881 && put_user_sal(host_time, arg1))
4882 goto efault;
4883 }
4884 break;
4885 #endif
4886 case TARGET_NR_mknod:
4887 if (!(p = lock_user_string(arg1)))
4888 goto efault;
4889 ret = get_errno(mknod(p, arg2, arg3));
4890 unlock_user(p, arg1, 0);
4891 break;
4892 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4893 case TARGET_NR_mknodat:
4894 if (!(p = lock_user_string(arg2)))
4895 goto efault;
4896 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4897 unlock_user(p, arg2, 0);
4898 break;
4899 #endif
4900 case TARGET_NR_chmod:
4901 if (!(p = lock_user_string(arg1)))
4902 goto efault;
4903 ret = get_errno(chmod(p, arg2));
4904 unlock_user(p, arg1, 0);
4905 break;
4906 #ifdef TARGET_NR_break
4907 case TARGET_NR_break:
4908 goto unimplemented;
4909 #endif
4910 #ifdef TARGET_NR_oldstat
4911 case TARGET_NR_oldstat:
4912 goto unimplemented;
4913 #endif
4914 case TARGET_NR_lseek:
4915 ret = get_errno(lseek(arg1, arg2, arg3));
4916 break;
4917 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4918 /* Alpha specific */
4919 case TARGET_NR_getxpid:
4920 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4921 ret = get_errno(getpid());
4922 break;
4923 #endif
4924 #ifdef TARGET_NR_getpid
4925 case TARGET_NR_getpid:
4926 ret = get_errno(getpid());
4927 break;
4928 #endif
4929 case TARGET_NR_mount:
4930 {
4931 /* need to look at the data field */
4932 void *p2, *p3;
4933 p = lock_user_string(arg1);
4934 p2 = lock_user_string(arg2);
4935 p3 = lock_user_string(arg3);
4936 if (!p || !p2 || !p3)
4937 ret = -TARGET_EFAULT;
4938 else {
4939 /* FIXME - arg5 should be locked, but it isn't clear how to
4940 * do that since it's not guaranteed to be a NULL-terminated
4941 * string.
4942 */
4943 if ( ! arg5 )
4944 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
4945 else
4946 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4947 }
4948 unlock_user(p, arg1, 0);
4949 unlock_user(p2, arg2, 0);
4950 unlock_user(p3, arg3, 0);
4951 break;
4952 }
4953 #ifdef TARGET_NR_umount
4954 case TARGET_NR_umount:
4955 if (!(p = lock_user_string(arg1)))
4956 goto efault;
4957 ret = get_errno(umount(p));
4958 unlock_user(p, arg1, 0);
4959 break;
4960 #endif
4961 #ifdef TARGET_NR_stime /* not on alpha */
4962 case TARGET_NR_stime:
4963 {
4964 time_t host_time;
4965 if (get_user_sal(host_time, arg1))
4966 goto efault;
4967 ret = get_errno(stime(&host_time));
4968 }
4969 break;
4970 #endif
4971 case TARGET_NR_ptrace:
4972 goto unimplemented;
4973 #ifdef TARGET_NR_alarm /* not on alpha */
4974 case TARGET_NR_alarm:
4975 ret = alarm(arg1);
4976 break;
4977 #endif
4978 #ifdef TARGET_NR_oldfstat
4979 case TARGET_NR_oldfstat:
4980 goto unimplemented;
4981 #endif
4982 #ifdef TARGET_NR_pause /* not on alpha */
4983 case TARGET_NR_pause:
4984 ret = get_errno(pause());
4985 break;
4986 #endif
4987 #ifdef TARGET_NR_utime
4988 case TARGET_NR_utime:
4989 {
4990 struct utimbuf tbuf, *host_tbuf;
4991 struct target_utimbuf *target_tbuf;
4992 if (arg2) {
4993 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4994 goto efault;
4995 tbuf.actime = tswapl(target_tbuf->actime);
4996 tbuf.modtime = tswapl(target_tbuf->modtime);
4997 unlock_user_struct(target_tbuf, arg2, 0);
4998 host_tbuf = &tbuf;
4999 } else {
5000 host_tbuf = NULL;
5001 }
5002 if (!(p = lock_user_string(arg1)))
5003 goto efault;
5004 ret = get_errno(utime(p, host_tbuf));
5005 unlock_user(p, arg1, 0);
5006 }
5007 break;
5008 #endif
5009 case TARGET_NR_utimes:
5010 {
5011 struct timeval *tvp, tv[2];
5012 if (arg2) {
5013 if (copy_from_user_timeval(&tv[0], arg2)
5014 || copy_from_user_timeval(&tv[1],
5015 arg2 + sizeof(struct target_timeval)))
5016 goto efault;
5017 tvp = tv;
5018 } else {
5019 tvp = NULL;
5020 }
5021 if (!(p = lock_user_string(arg1)))
5022 goto efault;
5023 ret = get_errno(utimes(p, tvp));
5024 unlock_user(p, arg1, 0);
5025 }
5026 break;
5027 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5028 case TARGET_NR_futimesat:
5029 {
5030 struct timeval *tvp, tv[2];
5031 if (arg3) {
5032 if (copy_from_user_timeval(&tv[0], arg3)
5033 || copy_from_user_timeval(&tv[1],
5034 arg3 + sizeof(struct target_timeval)))
5035 goto efault;
5036 tvp = tv;
5037 } else {
5038 tvp = NULL;
5039 }
5040 if (!(p = lock_user_string(arg2)))
5041 goto efault;
5042 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5043 unlock_user(p, arg2, 0);
5044 }
5045 break;
5046 #endif
5047 #ifdef TARGET_NR_stty
5048 case TARGET_NR_stty:
5049 goto unimplemented;
5050 #endif
5051 #ifdef TARGET_NR_gtty
5052 case TARGET_NR_gtty:
5053 goto unimplemented;
5054 #endif
5055 case TARGET_NR_access:
5056 if (!(p = lock_user_string(arg1)))
5057 goto efault;
5058 ret = get_errno(access(path(p), arg2));
5059 unlock_user(p, arg1, 0);
5060 break;
5061 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5062 case TARGET_NR_faccessat:
5063 if (!(p = lock_user_string(arg2)))
5064 goto efault;
5065 ret = get_errno(sys_faccessat(arg1, p, arg3));
5066 unlock_user(p, arg2, 0);
5067 break;
5068 #endif
5069 #ifdef TARGET_NR_nice /* not on alpha */
5070 case TARGET_NR_nice:
5071 ret = get_errno(nice(arg1));
5072 break;
5073 #endif
5074 #ifdef TARGET_NR_ftime
5075 case TARGET_NR_ftime:
5076 goto unimplemented;
5077 #endif
5078 case TARGET_NR_sync:
5079 sync();
5080 ret = 0;
5081 break;
5082 case TARGET_NR_kill:
5083 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5084 break;
5085 case TARGET_NR_rename:
5086 {
5087 void *p2;
5088 p = lock_user_string(arg1);
5089 p2 = lock_user_string(arg2);
5090 if (!p || !p2)
5091 ret = -TARGET_EFAULT;
5092 else
5093 ret = get_errno(rename(p, p2));
5094 unlock_user(p2, arg2, 0);
5095 unlock_user(p, arg1, 0);
5096 }
5097 break;
5098 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5099 case TARGET_NR_renameat:
5100 {
5101 void *p2;
5102 p = lock_user_string(arg2);
5103 p2 = lock_user_string(arg4);
5104 if (!p || !p2)
5105 ret = -TARGET_EFAULT;
5106 else
5107 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5108 unlock_user(p2, arg4, 0);
5109 unlock_user(p, arg2, 0);
5110 }
5111 break;
5112 #endif
5113 case TARGET_NR_mkdir:
5114 if (!(p = lock_user_string(arg1)))
5115 goto efault;
5116 ret = get_errno(mkdir(p, arg2));
5117 unlock_user(p, arg1, 0);
5118 break;
5119 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5120 case TARGET_NR_mkdirat:
5121 if (!(p = lock_user_string(arg2)))
5122 goto efault;
5123 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5124 unlock_user(p, arg2, 0);
5125 break;
5126 #endif
5127 case TARGET_NR_rmdir:
5128 if (!(p = lock_user_string(arg1)))
5129 goto efault;
5130 ret = get_errno(rmdir(p));
5131 unlock_user(p, arg1, 0);
5132 break;
5133 case TARGET_NR_dup:
5134 ret = get_errno(dup(arg1));
5135 break;
5136 case TARGET_NR_pipe:
5137 ret = do_pipe(cpu_env, arg1, 0, 0);
5138 break;
5139 #ifdef TARGET_NR_pipe2
5140 case TARGET_NR_pipe2:
5141 ret = do_pipe(cpu_env, arg1, arg2, 1);
5142 break;
5143 #endif
5144 case TARGET_NR_times:
5145 {
5146 struct target_tms *tmsp;
5147 struct tms tms;
5148 ret = get_errno(times(&tms));
5149 if (arg1) {
5150 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5151 if (!tmsp)
5152 goto efault;
5153 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
5154 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
5155 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
5156 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
5157 }
5158 if (!is_error(ret))
5159 ret = host_to_target_clock_t(ret);
5160 }
5161 break;
5162 #ifdef TARGET_NR_prof
5163 case TARGET_NR_prof:
5164 goto unimplemented;
5165 #endif
5166 #ifdef TARGET_NR_signal
5167 case TARGET_NR_signal:
5168 goto unimplemented;
5169 #endif
5170 case TARGET_NR_acct:
5171 if (arg1 == 0) {
5172 ret = get_errno(acct(NULL));
5173 } else {
5174 if (!(p = lock_user_string(arg1)))
5175 goto efault;
5176 ret = get_errno(acct(path(p)));
5177 unlock_user(p, arg1, 0);
5178 }
5179 break;
5180 #ifdef TARGET_NR_umount2 /* not on alpha */
5181 case TARGET_NR_umount2:
5182 if (!(p = lock_user_string(arg1)))
5183 goto efault;
5184 ret = get_errno(umount2(p, arg2));
5185 unlock_user(p, arg1, 0);
5186 break;
5187 #endif
5188 #ifdef TARGET_NR_lock
5189 case TARGET_NR_lock:
5190 goto unimplemented;
5191 #endif
5192 case TARGET_NR_ioctl:
5193 ret = do_ioctl(arg1, arg2, arg3);
5194 break;
5195 case TARGET_NR_fcntl:
5196 ret = do_fcntl(arg1, arg2, arg3);
5197 break;
5198 #ifdef TARGET_NR_mpx
5199 case TARGET_NR_mpx:
5200 goto unimplemented;
5201 #endif
5202 case TARGET_NR_setpgid:
5203 ret = get_errno(setpgid(arg1, arg2));
5204 break;
5205 #ifdef TARGET_NR_ulimit
5206 case TARGET_NR_ulimit:
5207 goto unimplemented;
5208 #endif
5209 #ifdef TARGET_NR_oldolduname
5210 case TARGET_NR_oldolduname:
5211 goto unimplemented;
5212 #endif
5213 case TARGET_NR_umask:
5214 ret = get_errno(umask(arg1));
5215 break;
5216 case TARGET_NR_chroot:
5217 if (!(p = lock_user_string(arg1)))
5218 goto efault;
5219 ret = get_errno(chroot(p));
5220 unlock_user(p, arg1, 0);
5221 break;
5222 case TARGET_NR_ustat:
5223 goto unimplemented;
5224 case TARGET_NR_dup2:
5225 ret = get_errno(dup2(arg1, arg2));
5226 break;
5227 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5228 case TARGET_NR_dup3:
5229 ret = get_errno(dup3(arg1, arg2, arg3));
5230 break;
5231 #endif
5232 #ifdef TARGET_NR_getppid /* not on alpha */
5233 case TARGET_NR_getppid:
5234 ret = get_errno(getppid());
5235 break;
5236 #endif
5237 case TARGET_NR_getpgrp:
5238 ret = get_errno(getpgrp());
5239 break;
5240 case TARGET_NR_setsid:
5241 ret = get_errno(setsid());
5242 break;
5243 #ifdef TARGET_NR_sigaction
5244 case TARGET_NR_sigaction:
5245 {
5246 #if defined(TARGET_ALPHA)
5247 struct target_sigaction act, oact, *pact = 0;
5248 struct target_old_sigaction *old_act;
5249 if (arg2) {
5250 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5251 goto efault;
5252 act._sa_handler = old_act->_sa_handler;
5253 target_siginitset(&act.sa_mask, old_act->sa_mask);
5254 act.sa_flags = old_act->sa_flags;
5255 act.sa_restorer = 0;
5256 unlock_user_struct(old_act, arg2, 0);
5257 pact = &act;
5258 }
5259 ret = get_errno(do_sigaction(arg1, pact, &oact));
5260 if (!is_error(ret) && arg3) {
5261 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5262 goto efault;
5263 old_act->_sa_handler = oact._sa_handler;
5264 old_act->sa_mask = oact.sa_mask.sig[0];
5265 old_act->sa_flags = oact.sa_flags;
5266 unlock_user_struct(old_act, arg3, 1);
5267 }
5268 #elif defined(TARGET_MIPS)
5269 struct target_sigaction act, oact, *pact, *old_act;
5270
5271 if (arg2) {
5272 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5273 goto efault;
5274 act._sa_handler = old_act->_sa_handler;
5275 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5276 act.sa_flags = old_act->sa_flags;
5277 unlock_user_struct(old_act, arg2, 0);
5278 pact = &act;
5279 } else {
5280 pact = NULL;
5281 }
5282
5283 ret = get_errno(do_sigaction(arg1, pact, &oact));
5284
5285 if (!is_error(ret) && arg3) {
5286 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5287 goto efault;
5288 old_act->_sa_handler = oact._sa_handler;
5289 old_act->sa_flags = oact.sa_flags;
5290 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5291 old_act->sa_mask.sig[1] = 0;
5292 old_act->sa_mask.sig[2] = 0;
5293 old_act->sa_mask.sig[3] = 0;
5294 unlock_user_struct(old_act, arg3, 1);
5295 }
5296 #else
5297 struct target_old_sigaction *old_act;
5298 struct target_sigaction act, oact, *pact;
5299 if (arg2) {
5300 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5301 goto efault;
5302 act._sa_handler = old_act->_sa_handler;
5303 target_siginitset(&act.sa_mask, old_act->sa_mask);
5304 act.sa_flags = old_act->sa_flags;
5305 act.sa_restorer = old_act->sa_restorer;
5306 unlock_user_struct(old_act, arg2, 0);
5307 pact = &act;
5308 } else {
5309 pact = NULL;
5310 }
5311 ret = get_errno(do_sigaction(arg1, pact, &oact));
5312 if (!is_error(ret) && arg3) {
5313 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5314 goto efault;
5315 old_act->_sa_handler = oact._sa_handler;
5316 old_act->sa_mask = oact.sa_mask.sig[0];
5317 old_act->sa_flags = oact.sa_flags;
5318 old_act->sa_restorer = oact.sa_restorer;
5319 unlock_user_struct(old_act, arg3, 1);
5320 }
5321 #endif
5322 }
5323 break;
5324 #endif
5325 case TARGET_NR_rt_sigaction:
5326 {
5327 #if defined(TARGET_ALPHA)
5328 struct target_sigaction act, oact, *pact = 0;
5329 struct target_rt_sigaction *rt_act;
5330 /* ??? arg4 == sizeof(sigset_t). */
5331 if (arg2) {
5332 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5333 goto efault;
5334 act._sa_handler = rt_act->_sa_handler;
5335 act.sa_mask = rt_act->sa_mask;
5336 act.sa_flags = rt_act->sa_flags;
5337 act.sa_restorer = arg5;
5338 unlock_user_struct(rt_act, arg2, 0);
5339 pact = &act;
5340 }
5341 ret = get_errno(do_sigaction(arg1, pact, &oact));
5342 if (!is_error(ret) && arg3) {
5343 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5344 goto efault;
5345 rt_act->_sa_handler = oact._sa_handler;
5346 rt_act->sa_mask = oact.sa_mask;
5347 rt_act->sa_flags = oact.sa_flags;
5348 unlock_user_struct(rt_act, arg3, 1);
5349 }
5350 #else
5351 struct target_sigaction *act;
5352 struct target_sigaction *oact;
5353
5354 if (arg2) {
5355 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5356 goto efault;
5357 } else
5358 act = NULL;
5359 if (arg3) {
5360 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5361 ret = -TARGET_EFAULT;
5362 goto rt_sigaction_fail;
5363 }
5364 } else
5365 oact = NULL;
5366 ret = get_errno(do_sigaction(arg1, act, oact));
5367 rt_sigaction_fail:
5368 if (act)
5369 unlock_user_struct(act, arg2, 0);
5370 if (oact)
5371 unlock_user_struct(oact, arg3, 1);
5372 #endif
5373 }
5374 break;
5375 #ifdef TARGET_NR_sgetmask /* not on alpha */
5376 case TARGET_NR_sgetmask:
5377 {
5378 sigset_t cur_set;
5379 abi_ulong target_set;
5380 sigprocmask(0, NULL, &cur_set);
5381 host_to_target_old_sigset(&target_set, &cur_set);
5382 ret = target_set;
5383 }
5384 break;
5385 #endif
5386 #ifdef TARGET_NR_ssetmask /* not on alpha */
5387 case TARGET_NR_ssetmask:
5388 {
5389 sigset_t set, oset, cur_set;
5390 abi_ulong target_set = arg1;
5391 sigprocmask(0, NULL, &cur_set);
5392 target_to_host_old_sigset(&set, &target_set);
5393 sigorset(&set, &set, &cur_set);
5394 sigprocmask(SIG_SETMASK, &set, &oset);
5395 host_to_target_old_sigset(&target_set, &oset);
5396 ret = target_set;
5397 }
5398 break;
5399 #endif
5400 #ifdef TARGET_NR_sigprocmask
5401 case TARGET_NR_sigprocmask:
5402 {
5403 #if defined(TARGET_ALPHA)
5404 sigset_t set, oldset;
5405 abi_ulong mask;
5406 int how;
5407
5408 switch (arg1) {
5409 case TARGET_SIG_BLOCK:
5410 how = SIG_BLOCK;
5411 break;
5412 case TARGET_SIG_UNBLOCK:
5413 how = SIG_UNBLOCK;
5414 break;
5415 case TARGET_SIG_SETMASK:
5416 how = SIG_SETMASK;
5417 break;
5418 default:
5419 ret = -TARGET_EINVAL;
5420 goto fail;
5421 }
5422 mask = arg2;
5423 target_to_host_old_sigset(&set, &mask);
5424
5425 ret = get_errno(sigprocmask(how, &set, &oldset));
5426
5427 if (!is_error(ret)) {
5428 host_to_target_old_sigset(&mask, &oldset);
5429 ret = mask;
5430 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5431 }
5432 #else
5433 sigset_t set, oldset, *set_ptr;
5434 int how;
5435
5436 if (arg2) {
5437 switch (arg1) {
5438 case TARGET_SIG_BLOCK:
5439 how = SIG_BLOCK;
5440 break;
5441 case TARGET_SIG_UNBLOCK:
5442 how = SIG_UNBLOCK;
5443 break;
5444 case TARGET_SIG_SETMASK:
5445 how = SIG_SETMASK;
5446 break;
5447 default:
5448 ret = -TARGET_EINVAL;
5449 goto fail;
5450 }
5451 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5452 goto efault;
5453 target_to_host_old_sigset(&set, p);
5454 unlock_user(p, arg2, 0);
5455 set_ptr = &set;
5456 } else {
5457 how = 0;
5458 set_ptr = NULL;
5459 }
5460 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5461 if (!is_error(ret) && arg3) {
5462 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5463 goto efault;
5464 host_to_target_old_sigset(p, &oldset);
5465 unlock_user(p, arg3, sizeof(target_sigset_t));
5466 }
5467 #endif
5468 }
5469 break;
5470 #endif
5471 case TARGET_NR_rt_sigprocmask:
5472 {
5473 int how = arg1;
5474 sigset_t set, oldset, *set_ptr;
5475
5476 if (arg2) {
5477 switch(how) {
5478 case TARGET_SIG_BLOCK:
5479 how = SIG_BLOCK;
5480 break;
5481 case TARGET_SIG_UNBLOCK:
5482 how = SIG_UNBLOCK;
5483 break;
5484 case TARGET_SIG_SETMASK:
5485 how = SIG_SETMASK;
5486 break;
5487 default:
5488 ret = -TARGET_EINVAL;
5489 goto fail;
5490 }
5491 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5492 goto efault;
5493 target_to_host_sigset(&set, p);
5494 unlock_user(p, arg2, 0);
5495 set_ptr = &set;
5496 } else {
5497 how = 0;
5498 set_ptr = NULL;
5499 }
5500 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5501 if (!is_error(ret) && arg3) {
5502 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5503 goto efault;
5504 host_to_target_sigset(p, &oldset);
5505 unlock_user(p, arg3, sizeof(target_sigset_t));
5506 }
5507 }
5508 break;
5509 #ifdef TARGET_NR_sigpending
5510 case TARGET_NR_sigpending:
5511 {
5512 sigset_t set;
5513 ret = get_errno(sigpending(&set));
5514 if (!is_error(ret)) {
5515 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5516 goto efault;
5517 host_to_target_old_sigset(p, &set);
5518 unlock_user(p, arg1, sizeof(target_sigset_t));
5519 }
5520 }
5521 break;
5522 #endif
5523 case TARGET_NR_rt_sigpending:
5524 {
5525 sigset_t set;
5526 ret = get_errno(sigpending(&set));
5527 if (!is_error(ret)) {
5528 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5529 goto efault;
5530 host_to_target_sigset(p, &set);
5531 unlock_user(p, arg1, sizeof(target_sigset_t));
5532 }
5533 }
5534 break;
5535 #ifdef TARGET_NR_sigsuspend
5536 case TARGET_NR_sigsuspend:
5537 {
5538 sigset_t set;
5539 #if defined(TARGET_ALPHA)
5540 abi_ulong mask = arg1;
5541 target_to_host_old_sigset(&set, &mask);
5542 #else
5543 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5544 goto efault;
5545 target_to_host_old_sigset(&set, p);
5546 unlock_user(p, arg1, 0);
5547 #endif
5548 ret = get_errno(sigsuspend(&set));
5549 }
5550 break;
5551 #endif
5552 case TARGET_NR_rt_sigsuspend:
5553 {
5554 sigset_t set;
5555 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5556 goto efault;
5557 target_to_host_sigset(&set, p);
5558 unlock_user(p, arg1, 0);
5559 ret = get_errno(sigsuspend(&set));
5560 }
5561 break;
5562 case TARGET_NR_rt_sigtimedwait:
5563 {
5564 sigset_t set;
5565 struct timespec uts, *puts;
5566 siginfo_t uinfo;
5567
5568 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5569 goto efault;
5570 target_to_host_sigset(&set, p);
5571 unlock_user(p, arg1, 0);
5572 if (arg3) {
5573 puts = &uts;
5574 target_to_host_timespec(puts, arg3);
5575 } else {
5576 puts = NULL;
5577 }
5578 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5579 if (!is_error(ret) && arg2) {
5580 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5581 goto efault;
5582 host_to_target_siginfo(p, &uinfo);
5583 unlock_user(p, arg2, sizeof(target_siginfo_t));
5584 }
5585 }
5586 break;
5587 case TARGET_NR_rt_sigqueueinfo:
5588 {
5589 siginfo_t uinfo;
5590 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5591 goto efault;
5592 target_to_host_siginfo(&uinfo, p);
5593 unlock_user(p, arg1, 0);
5594 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5595 }
5596 break;
5597 #ifdef TARGET_NR_sigreturn
5598 case TARGET_NR_sigreturn:
5599 /* NOTE: ret is eax, so not transcoding must be done */
5600 ret = do_sigreturn(cpu_env);
5601 break;
5602 #endif
5603 case TARGET_NR_rt_sigreturn:
5604 /* NOTE: ret is eax, so not transcoding must be done */
5605 ret = do_rt_sigreturn(cpu_env);
5606 break;
5607 case TARGET_NR_sethostname:
5608 if (!(p = lock_user_string(arg1)))
5609 goto efault;
5610 ret = get_errno(sethostname(p, arg2));
5611 unlock_user(p, arg1, 0);
5612 break;
5613 case TARGET_NR_setrlimit:
5614 {
5615 int resource = target_to_host_resource(arg1);
5616 struct target_rlimit *target_rlim;
5617 struct rlimit rlim;
5618 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5619 goto efault;
5620 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5621 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5622 unlock_user_struct(target_rlim, arg2, 0);
5623 ret = get_errno(setrlimit(resource, &rlim));
5624 }
5625 break;
5626 case TARGET_NR_getrlimit:
5627 {
5628 int resource = target_to_host_resource(arg1);
5629 struct target_rlimit *target_rlim;
5630 struct rlimit rlim;
5631
5632 ret = get_errno(getrlimit(resource, &rlim));
5633 if (!is_error(ret)) {
5634 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5635 goto efault;
5636 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5637 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5638 unlock_user_struct(target_rlim, arg2, 1);
5639 }
5640 }
5641 break;
5642 case TARGET_NR_getrusage:
5643 {
5644 struct rusage rusage;
5645 ret = get_errno(getrusage(arg1, &rusage));
5646 if (!is_error(ret)) {
5647 host_to_target_rusage(arg2, &rusage);
5648 }
5649 }
5650 break;
5651 case TARGET_NR_gettimeofday:
5652 {
5653 struct timeval tv;
5654 ret = get_errno(gettimeofday(&tv, NULL));
5655 if (!is_error(ret)) {
5656 if (copy_to_user_timeval(arg1, &tv))
5657 goto efault;
5658 }
5659 }
5660 break;
5661 case TARGET_NR_settimeofday:
5662 {
5663 struct timeval tv;
5664 if (copy_from_user_timeval(&tv, arg1))
5665 goto efault;
5666 ret = get_errno(settimeofday(&tv, NULL));
5667 }
5668 break;
5669 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5670 case TARGET_NR_select:
5671 {
5672 struct target_sel_arg_struct *sel;
5673 abi_ulong inp, outp, exp, tvp;
5674 long nsel;
5675
5676 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5677 goto efault;
5678 nsel = tswapl(sel->n);
5679 inp = tswapl(sel->inp);
5680 outp = tswapl(sel->outp);
5681 exp = tswapl(sel->exp);
5682 tvp = tswapl(sel->tvp);
5683 unlock_user_struct(sel, arg1, 0);
5684 ret = do_select(nsel, inp, outp, exp, tvp);
5685 }
5686 break;
5687 #endif
5688 #ifdef TARGET_NR_pselect6
5689 case TARGET_NR_pselect6:
5690 {
5691 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
5692 fd_set rfds, wfds, efds;
5693 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
5694 struct timespec ts, *ts_ptr;
5695
5696 /*
5697 * The 6th arg is actually two args smashed together,
5698 * so we cannot use the C library.
5699 */
5700 sigset_t set;
5701 struct {
5702 sigset_t *set;
5703 size_t size;
5704 } sig, *sig_ptr;
5705
5706 abi_ulong arg_sigset, arg_sigsize, *arg7;
5707 target_sigset_t *target_sigset;
5708
5709 n = arg1;
5710 rfd_addr = arg2;
5711 wfd_addr = arg3;
5712 efd_addr = arg4;
5713 ts_addr = arg5;
5714
5715 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
5716 if (ret) {
5717 goto fail;
5718 }
5719 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
5720 if (ret) {
5721 goto fail;
5722 }
5723 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
5724 if (ret) {
5725 goto fail;
5726 }
5727
5728 /*
5729 * This takes a timespec, and not a timeval, so we cannot
5730 * use the do_select() helper ...
5731 */
5732 if (ts_addr) {
5733 if (target_to_host_timespec(&ts, ts_addr)) {
5734 goto efault;
5735 }
5736 ts_ptr = &ts;
5737 } else {
5738 ts_ptr = NULL;
5739 }
5740
5741 /* Extract the two packed args for the sigset */
5742 if (arg6) {
5743 sig_ptr = &sig;
5744 sig.size = _NSIG / 8;
5745
5746 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
5747 if (!arg7) {
5748 goto efault;
5749 }
5750 arg_sigset = tswapl(arg7[0]);
5751 arg_sigsize = tswapl(arg7[1]);
5752 unlock_user(arg7, arg6, 0);
5753
5754 if (arg_sigset) {
5755 sig.set = &set;
5756 if (arg_sigsize != sizeof(*target_sigset)) {
5757 /* Like the kernel, we enforce correct size sigsets */
5758 ret = -TARGET_EINVAL;
5759 goto fail;
5760 }
5761 target_sigset = lock_user(VERIFY_READ, arg_sigset,
5762 sizeof(*target_sigset), 1);
5763 if (!target_sigset) {
5764 goto efault;
5765 }
5766 target_to_host_sigset(&set, target_sigset);
5767 unlock_user(target_sigset, arg_sigset, 0);
5768 } else {
5769 sig.set = NULL;
5770 }
5771 } else {
5772 sig_ptr = NULL;
5773 }
5774
5775 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
5776 ts_ptr, sig_ptr));
5777
5778 if (!is_error(ret)) {
5779 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
5780 goto efault;
5781 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
5782 goto efault;
5783 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
5784 goto efault;
5785
5786 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
5787 goto efault;
5788 }
5789 }
5790 break;
5791 #endif
5792 case TARGET_NR_symlink:
5793 {
5794 void *p2;
5795 p = lock_user_string(arg1);
5796 p2 = lock_user_string(arg2);
5797 if (!p || !p2)
5798 ret = -TARGET_EFAULT;
5799 else
5800 ret = get_errno(symlink(p, p2));
5801 unlock_user(p2, arg2, 0);
5802 unlock_user(p, arg1, 0);
5803 }
5804 break;
5805 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5806 case TARGET_NR_symlinkat:
5807 {
5808 void *p2;
5809 p = lock_user_string(arg1);
5810 p2 = lock_user_string(arg3);
5811 if (!p || !p2)
5812 ret = -TARGET_EFAULT;
5813 else
5814 ret = get_errno(sys_symlinkat(p, arg2, p2));
5815 unlock_user(p2, arg3, 0);
5816 unlock_user(p, arg1, 0);
5817 }
5818 break;
5819 #endif
5820 #ifdef TARGET_NR_oldlstat
5821 case TARGET_NR_oldlstat:
5822 goto unimplemented;
5823 #endif
5824 case TARGET_NR_readlink:
5825 {
5826 void *p2, *temp;
5827 p = lock_user_string(arg1);
5828 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5829 if (!p || !p2)
5830 ret = -TARGET_EFAULT;
5831 else {
5832 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5833 char real[PATH_MAX];
5834 temp = realpath(exec_path,real);
5835 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5836 snprintf((char *)p2, arg3, "%s", real);
5837 }
5838 else
5839 ret = get_errno(readlink(path(p), p2, arg3));
5840 }
5841 unlock_user(p2, arg2, ret);
5842 unlock_user(p, arg1, 0);
5843 }
5844 break;
5845 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5846 case TARGET_NR_readlinkat:
5847 {
5848 void *p2;
5849 p = lock_user_string(arg2);
5850 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5851 if (!p || !p2)
5852 ret = -TARGET_EFAULT;
5853 else
5854 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5855 unlock_user(p2, arg3, ret);
5856 unlock_user(p, arg2, 0);
5857 }
5858 break;
5859 #endif
5860 #ifdef TARGET_NR_uselib
5861 case TARGET_NR_uselib:
5862 goto unimplemented;
5863 #endif
5864 #ifdef TARGET_NR_swapon
5865 case TARGET_NR_swapon:
5866 if (!(p = lock_user_string(arg1)))
5867 goto efault;
5868 ret = get_errno(swapon(p, arg2));
5869 unlock_user(p, arg1, 0);
5870 break;
5871 #endif
5872 case TARGET_NR_reboot:
5873 goto unimplemented;
5874 #ifdef TARGET_NR_readdir
5875 case TARGET_NR_readdir:
5876 goto unimplemented;
5877 #endif
5878 #ifdef TARGET_NR_mmap
5879 case TARGET_NR_mmap:
5880 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5881 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5882 || defined(TARGET_S390X)
5883 {
5884 abi_ulong *v;
5885 abi_ulong v1, v2, v3, v4, v5, v6;
5886 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5887 goto efault;
5888 v1 = tswapl(v[0]);
5889 v2 = tswapl(v[1]);
5890 v3 = tswapl(v[2]);
5891 v4 = tswapl(v[3]);
5892 v5 = tswapl(v[4]);
5893 v6 = tswapl(v[5]);
5894 unlock_user(v, arg1, 0);
5895 ret = get_errno(target_mmap(v1, v2, v3,
5896 target_to_host_bitmask(v4, mmap_flags_tbl),
5897 v5, v6));
5898 }
5899 #else
5900 ret = get_errno(target_mmap(arg1, arg2, arg3,
5901 target_to_host_bitmask(arg4, mmap_flags_tbl),
5902 arg5,
5903 arg6));
5904 #endif
5905 break;
5906 #endif
5907 #ifdef TARGET_NR_mmap2
5908 case TARGET_NR_mmap2:
5909 #ifndef MMAP_SHIFT
5910 #define MMAP_SHIFT 12
5911 #endif
5912 ret = get_errno(target_mmap(arg1, arg2, arg3,
5913 target_to_host_bitmask(arg4, mmap_flags_tbl),
5914 arg5,
5915 arg6 << MMAP_SHIFT));
5916 break;
5917 #endif
5918 case TARGET_NR_munmap:
5919 ret = get_errno(target_munmap(arg1, arg2));
5920 break;
5921 case TARGET_NR_mprotect:
5922 {
5923 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5924 /* Special hack to detect libc making the stack executable. */
5925 if ((arg3 & PROT_GROWSDOWN)
5926 && arg1 >= ts->info->stack_limit
5927 && arg1 <= ts->info->start_stack) {
5928 arg3 &= ~PROT_GROWSDOWN;
5929 arg2 = arg2 + arg1 - ts->info->stack_limit;
5930 arg1 = ts->info->stack_limit;
5931 }
5932 }
5933 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5934 break;
5935 #ifdef TARGET_NR_mremap
5936 case TARGET_NR_mremap:
5937 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5938 break;
5939 #endif
5940 /* ??? msync/mlock/munlock are broken for softmmu. */
5941 #ifdef TARGET_NR_msync
5942 case TARGET_NR_msync:
5943 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5944 break;
5945 #endif
5946 #ifdef TARGET_NR_mlock
5947 case TARGET_NR_mlock:
5948 ret = get_errno(mlock(g2h(arg1), arg2));
5949 break;
5950 #endif
5951 #ifdef TARGET_NR_munlock
5952 case TARGET_NR_munlock:
5953 ret = get_errno(munlock(g2h(arg1), arg2));
5954 break;
5955 #endif
5956 #ifdef TARGET_NR_mlockall
5957 case TARGET_NR_mlockall:
5958 ret = get_errno(mlockall(arg1));
5959 break;
5960 #endif
5961 #ifdef TARGET_NR_munlockall
5962 case TARGET_NR_munlockall:
5963 ret = get_errno(munlockall());
5964 break;
5965 #endif
5966 case TARGET_NR_truncate:
5967 if (!(p = lock_user_string(arg1)))
5968 goto efault;
5969 ret = get_errno(truncate(p, arg2));
5970 unlock_user(p, arg1, 0);
5971 break;
5972 case TARGET_NR_ftruncate:
5973 ret = get_errno(ftruncate(arg1, arg2));
5974 break;
5975 case TARGET_NR_fchmod:
5976 ret = get_errno(fchmod(arg1, arg2));
5977 break;
5978 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5979 case TARGET_NR_fchmodat:
5980 if (!(p = lock_user_string(arg2)))
5981 goto efault;
5982 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5983 unlock_user(p, arg2, 0);
5984 break;
5985 #endif
5986 case TARGET_NR_getpriority:
5987 /* libc does special remapping of the return value of
5988 * sys_getpriority() so it's just easiest to call
5989 * sys_getpriority() directly rather than through libc. */
5990 ret = get_errno(sys_getpriority(arg1, arg2));
5991 break;
5992 case TARGET_NR_setpriority:
5993 ret = get_errno(setpriority(arg1, arg2, arg3));
5994 break;
5995 #ifdef TARGET_NR_profil
5996 case TARGET_NR_profil:
5997 goto unimplemented;
5998 #endif
5999 case TARGET_NR_statfs:
6000 if (!(p = lock_user_string(arg1)))
6001 goto efault;
6002 ret = get_errno(statfs(path(p), &stfs));
6003 unlock_user(p, arg1, 0);
6004 convert_statfs:
6005 if (!is_error(ret)) {
6006 struct target_statfs *target_stfs;
6007
6008 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6009 goto efault;
6010 __put_user(stfs.f_type, &target_stfs->f_type);
6011 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6012 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6013 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6014 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6015 __put_user(stfs.f_files, &target_stfs->f_files);
6016 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6017 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6018 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6019 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6020 unlock_user_struct(target_stfs, arg2, 1);
6021 }
6022 break;
6023 case TARGET_NR_fstatfs:
6024 ret = get_errno(fstatfs(arg1, &stfs));
6025 goto convert_statfs;
6026 #ifdef TARGET_NR_statfs64
6027 case TARGET_NR_statfs64:
6028 if (!(p = lock_user_string(arg1)))
6029 goto efault;
6030 ret = get_errno(statfs(path(p), &stfs));
6031 unlock_user(p, arg1, 0);
6032 convert_statfs64:
6033 if (!is_error(ret)) {
6034 struct target_statfs64 *target_stfs;
6035
6036 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6037 goto efault;
6038 __put_user(stfs.f_type, &target_stfs->f_type);
6039 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6040 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6041 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6042 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6043 __put_user(stfs.f_files, &target_stfs->f_files);
6044 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6045 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6046 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6047 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6048 unlock_user_struct(target_stfs, arg3, 1);
6049 }
6050 break;
6051 case TARGET_NR_fstatfs64:
6052 ret = get_errno(fstatfs(arg1, &stfs));
6053 goto convert_statfs64;
6054 #endif
6055 #ifdef TARGET_NR_ioperm
6056 case TARGET_NR_ioperm:
6057 goto unimplemented;
6058 #endif
6059 #ifdef TARGET_NR_socketcall
6060 case TARGET_NR_socketcall:
6061 ret = do_socketcall(arg1, arg2);
6062 break;
6063 #endif
6064 #ifdef TARGET_NR_accept
6065 case TARGET_NR_accept:
6066 ret = do_accept(arg1, arg2, arg3);
6067 break;
6068 #endif
6069 #ifdef TARGET_NR_bind
6070 case TARGET_NR_bind:
6071 ret = do_bind(arg1, arg2, arg3);
6072 break;
6073 #endif
6074 #ifdef TARGET_NR_connect
6075 case TARGET_NR_connect:
6076 ret = do_connect(arg1, arg2, arg3);
6077 break;
6078 #endif
6079 #ifdef TARGET_NR_getpeername
6080 case TARGET_NR_getpeername:
6081 ret = do_getpeername(arg1, arg2, arg3);
6082 break;
6083 #endif
6084 #ifdef TARGET_NR_getsockname
6085 case TARGET_NR_getsockname:
6086 ret = do_getsockname(arg1, arg2, arg3);
6087 break;
6088 #endif
6089 #ifdef TARGET_NR_getsockopt
6090 case TARGET_NR_getsockopt:
6091 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6092 break;
6093 #endif
6094 #ifdef TARGET_NR_listen
6095 case TARGET_NR_listen:
6096 ret = get_errno(listen(arg1, arg2));
6097 break;
6098 #endif
6099 #ifdef TARGET_NR_recv
6100 case TARGET_NR_recv:
6101 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6102 break;
6103 #endif
6104 #ifdef TARGET_NR_recvfrom
6105 case TARGET_NR_recvfrom:
6106 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6107 break;
6108 #endif
6109 #ifdef TARGET_NR_recvmsg
6110 case TARGET_NR_recvmsg:
6111 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6112 break;
6113 #endif
6114 #ifdef TARGET_NR_send
6115 case TARGET_NR_send:
6116 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6117 break;
6118 #endif
6119 #ifdef TARGET_NR_sendmsg
6120 case TARGET_NR_sendmsg:
6121 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6122 break;
6123 #endif
6124 #ifdef TARGET_NR_sendto
6125 case TARGET_NR_sendto:
6126 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6127 break;
6128 #endif
6129 #ifdef TARGET_NR_shutdown
6130 case TARGET_NR_shutdown:
6131 ret = get_errno(shutdown(arg1, arg2));
6132 break;
6133 #endif
6134 #ifdef TARGET_NR_socket
6135 case TARGET_NR_socket:
6136 ret = do_socket(arg1, arg2, arg3);
6137 break;
6138 #endif
6139 #ifdef TARGET_NR_socketpair
6140 case TARGET_NR_socketpair:
6141 ret = do_socketpair(arg1, arg2, arg3, arg4);
6142 break;
6143 #endif
6144 #ifdef TARGET_NR_setsockopt
6145 case TARGET_NR_setsockopt:
6146 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6147 break;
6148 #endif
6149
6150 case TARGET_NR_syslog:
6151 if (!(p = lock_user_string(arg2)))
6152 goto efault;
6153 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6154 unlock_user(p, arg2, 0);
6155 break;
6156
6157 case TARGET_NR_setitimer:
6158 {
6159 struct itimerval value, ovalue, *pvalue;
6160
6161 if (arg2) {
6162 pvalue = &value;
6163 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6164 || copy_from_user_timeval(&pvalue->it_value,
6165 arg2 + sizeof(struct target_timeval)))
6166 goto efault;
6167 } else {
6168 pvalue = NULL;
6169 }
6170 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6171 if (!is_error(ret) && arg3) {
6172 if (copy_to_user_timeval(arg3,
6173 &ovalue.it_interval)
6174 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6175 &ovalue.it_value))
6176 goto efault;
6177 }
6178 }
6179 break;
6180 case TARGET_NR_getitimer:
6181 {
6182 struct itimerval value;
6183
6184 ret = get_errno(getitimer(arg1, &value));
6185 if (!is_error(ret) && arg2) {
6186 if (copy_to_user_timeval(arg2,
6187 &value.it_interval)
6188 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6189 &value.it_value))
6190 goto efault;
6191 }
6192 }
6193 break;
6194 case TARGET_NR_stat:
6195 if (!(p = lock_user_string(arg1)))
6196 goto efault;
6197 ret = get_errno(stat(path(p), &st));
6198 unlock_user(p, arg1, 0);
6199 goto do_stat;
6200 case TARGET_NR_lstat:
6201 if (!(p = lock_user_string(arg1)))
6202 goto efault;
6203 ret = get_errno(lstat(path(p), &st));
6204 unlock_user(p, arg1, 0);
6205 goto do_stat;
6206 case TARGET_NR_fstat:
6207 {
6208 ret = get_errno(fstat(arg1, &st));
6209 do_stat:
6210 if (!is_error(ret)) {
6211 struct target_stat *target_st;
6212
6213 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6214 goto efault;
6215 memset(target_st, 0, sizeof(*target_st));
6216 __put_user(st.st_dev, &target_st->st_dev);
6217 __put_user(st.st_ino, &target_st->st_ino);
6218 __put_user(st.st_mode, &target_st->st_mode);
6219 __put_user(st.st_uid, &target_st->st_uid);
6220 __put_user(st.st_gid, &target_st->st_gid);
6221 __put_user(st.st_nlink, &target_st->st_nlink);
6222 __put_user(st.st_rdev, &target_st->st_rdev);
6223 __put_user(st.st_size, &target_st->st_size);
6224 __put_user(st.st_blksize, &target_st->st_blksize);
6225 __put_user(st.st_blocks, &target_st->st_blocks);
6226 __put_user(st.st_atime, &target_st->target_st_atime);
6227 __put_user(st.st_mtime, &target_st->target_st_mtime);
6228 __put_user(st.st_ctime, &target_st->target_st_ctime);
6229 unlock_user_struct(target_st, arg2, 1);
6230 }
6231 }
6232 break;
6233 #ifdef TARGET_NR_olduname
6234 case TARGET_NR_olduname:
6235 goto unimplemented;
6236 #endif
6237 #ifdef TARGET_NR_iopl
6238 case TARGET_NR_iopl:
6239 goto unimplemented;
6240 #endif
6241 case TARGET_NR_vhangup:
6242 ret = get_errno(vhangup());
6243 break;
6244 #ifdef TARGET_NR_idle
6245 case TARGET_NR_idle:
6246 goto unimplemented;
6247 #endif
6248 #ifdef TARGET_NR_syscall
6249 case TARGET_NR_syscall:
6250 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6251 arg6, arg7, arg8, 0);
6252 break;
6253 #endif
6254 case TARGET_NR_wait4:
6255 {
6256 int status;
6257 abi_long status_ptr = arg2;
6258 struct rusage rusage, *rusage_ptr;
6259 abi_ulong target_rusage = arg4;
6260 if (target_rusage)
6261 rusage_ptr = &rusage;
6262 else
6263 rusage_ptr = NULL;
6264 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6265 if (!is_error(ret)) {
6266 if (status_ptr) {
6267 status = host_to_target_waitstatus(status);
6268 if (put_user_s32(status, status_ptr))
6269 goto efault;
6270 }
6271 if (target_rusage)
6272 host_to_target_rusage(target_rusage, &rusage);
6273 }
6274 }
6275 break;
6276 #ifdef TARGET_NR_swapoff
6277 case TARGET_NR_swapoff:
6278 if (!(p = lock_user_string(arg1)))
6279 goto efault;
6280 ret = get_errno(swapoff(p));
6281 unlock_user(p, arg1, 0);
6282 break;
6283 #endif
6284 case TARGET_NR_sysinfo:
6285 {
6286 struct target_sysinfo *target_value;
6287 struct sysinfo value;
6288 ret = get_errno(sysinfo(&value));
6289 if (!is_error(ret) && arg1)
6290 {
6291 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6292 goto efault;
6293 __put_user(value.uptime, &target_value->uptime);
6294 __put_user(value.loads[0], &target_value->loads[0]);
6295 __put_user(value.loads[1], &target_value->loads[1]);
6296 __put_user(value.loads[2], &target_value->loads[2]);
6297 __put_user(value.totalram, &target_value->totalram);
6298 __put_user(value.freeram, &target_value->freeram);
6299 __put_user(value.sharedram, &target_value->sharedram);
6300 __put_user(value.bufferram, &target_value->bufferram);
6301 __put_user(value.totalswap, &target_value->totalswap);
6302 __put_user(value.freeswap, &target_value->freeswap);
6303 __put_user(value.procs, &target_value->procs);
6304 __put_user(value.totalhigh, &target_value->totalhigh);
6305 __put_user(value.freehigh, &target_value->freehigh);
6306 __put_user(value.mem_unit, &target_value->mem_unit);
6307 unlock_user_struct(target_value, arg1, 1);
6308 }
6309 }
6310 break;
6311 #ifdef TARGET_NR_ipc
6312 case TARGET_NR_ipc:
6313 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6314 break;
6315 #endif
6316 #ifdef TARGET_NR_semget
6317 case TARGET_NR_semget:
6318 ret = get_errno(semget(arg1, arg2, arg3));
6319 break;
6320 #endif
6321 #ifdef TARGET_NR_semop
6322 case TARGET_NR_semop:
6323 ret = get_errno(do_semop(arg1, arg2, arg3));
6324 break;
6325 #endif
6326 #ifdef TARGET_NR_semctl
6327 case TARGET_NR_semctl:
6328 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6329 break;
6330 #endif
6331 #ifdef TARGET_NR_msgctl
6332 case TARGET_NR_msgctl:
6333 ret = do_msgctl(arg1, arg2, arg3);
6334 break;
6335 #endif
6336 #ifdef TARGET_NR_msgget
6337 case TARGET_NR_msgget:
6338 ret = get_errno(msgget(arg1, arg2));
6339 break;
6340 #endif
6341 #ifdef TARGET_NR_msgrcv
6342 case TARGET_NR_msgrcv:
6343 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6344 break;
6345 #endif
6346 #ifdef TARGET_NR_msgsnd
6347 case TARGET_NR_msgsnd:
6348 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6349 break;
6350 #endif
6351 #ifdef TARGET_NR_shmget
6352 case TARGET_NR_shmget:
6353 ret = get_errno(shmget(arg1, arg2, arg3));
6354 break;
6355 #endif
6356 #ifdef TARGET_NR_shmctl
6357 case TARGET_NR_shmctl:
6358 ret = do_shmctl(arg1, arg2, arg3);
6359 break;
6360 #endif
6361 #ifdef TARGET_NR_shmat
6362 case TARGET_NR_shmat:
6363 ret = do_shmat(arg1, arg2, arg3);
6364 break;
6365 #endif
6366 #ifdef TARGET_NR_shmdt
6367 case TARGET_NR_shmdt:
6368 ret = do_shmdt(arg1);
6369 break;
6370 #endif
6371 case TARGET_NR_fsync:
6372 ret = get_errno(fsync(arg1));
6373 break;
6374 case TARGET_NR_clone:
6375 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6376 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6377 #elif defined(TARGET_CRIS)
6378 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6379 #elif defined(TARGET_S390X)
6380 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6381 #else
6382 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6383 #endif
6384 break;
6385 #ifdef __NR_exit_group
6386 /* new thread calls */
6387 case TARGET_NR_exit_group:
6388 #ifdef TARGET_GPROF
6389 _mcleanup();
6390 #endif
6391 gdb_exit(cpu_env, arg1);
6392 ret = get_errno(exit_group(arg1));
6393 break;
6394 #endif
6395 case TARGET_NR_setdomainname:
6396 if (!(p = lock_user_string(arg1)))
6397 goto efault;
6398 ret = get_errno(setdomainname(p, arg2));
6399 unlock_user(p, arg1, 0);
6400 break;
6401 case TARGET_NR_uname:
6402 /* no need to transcode because we use the linux syscall */
6403 {
6404 struct new_utsname * buf;
6405
6406 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6407 goto efault;
6408 ret = get_errno(sys_uname(buf));
6409 if (!is_error(ret)) {
6410 /* Overrite the native machine name with whatever is being
6411 emulated. */
6412 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6413 /* Allow the user to override the reported release. */
6414 if (qemu_uname_release && *qemu_uname_release)
6415 strcpy (buf->release, qemu_uname_release);
6416 }
6417 unlock_user_struct(buf, arg1, 1);
6418 }
6419 break;
6420 #ifdef TARGET_I386
6421 case TARGET_NR_modify_ldt:
6422 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6423 break;
6424 #if !defined(TARGET_X86_64)
6425 case TARGET_NR_vm86old:
6426 goto unimplemented;
6427 case TARGET_NR_vm86:
6428 ret = do_vm86(cpu_env, arg1, arg2);
6429 break;
6430 #endif
6431 #endif
6432 case TARGET_NR_adjtimex:
6433 goto unimplemented;
6434 #ifdef TARGET_NR_create_module
6435 case TARGET_NR_create_module:
6436 #endif
6437 case TARGET_NR_init_module:
6438 case TARGET_NR_delete_module:
6439 #ifdef TARGET_NR_get_kernel_syms
6440 case TARGET_NR_get_kernel_syms:
6441 #endif
6442 goto unimplemented;
6443 case TARGET_NR_quotactl:
6444 goto unimplemented;
6445 case TARGET_NR_getpgid:
6446 ret = get_errno(getpgid(arg1));
6447 break;
6448 case TARGET_NR_fchdir:
6449 ret = get_errno(fchdir(arg1));
6450 break;
6451 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6452 case TARGET_NR_bdflush:
6453 goto unimplemented;
6454 #endif
6455 #ifdef TARGET_NR_sysfs
6456 case TARGET_NR_sysfs:
6457 goto unimplemented;
6458 #endif
6459 case TARGET_NR_personality:
6460 ret = get_errno(personality(arg1));
6461 break;
6462 #ifdef TARGET_NR_afs_syscall
6463 case TARGET_NR_afs_syscall:
6464 goto unimplemented;
6465 #endif
6466 #ifdef TARGET_NR__llseek /* Not on alpha */
6467 case TARGET_NR__llseek:
6468 {
6469 int64_t res;
6470 #if !defined(__NR_llseek)
6471 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6472 if (res == -1) {
6473 ret = get_errno(res);
6474 } else {
6475 ret = 0;
6476 }
6477 #else
6478 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6479 #endif
6480 if ((ret == 0) && put_user_s64(res, arg4)) {
6481 goto efault;
6482 }
6483 }
6484 break;
6485 #endif
6486 case TARGET_NR_getdents:
6487 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6488 {
6489 struct target_dirent *target_dirp;
6490 struct linux_dirent *dirp;
6491 abi_long count = arg3;
6492
6493 dirp = malloc(count);
6494 if (!dirp) {
6495 ret = -TARGET_ENOMEM;
6496 goto fail;
6497 }
6498
6499 ret = get_errno(sys_getdents(arg1, dirp, count));
6500 if (!is_error(ret)) {
6501 struct linux_dirent *de;
6502 struct target_dirent *tde;
6503 int len = ret;
6504 int reclen, treclen;
6505 int count1, tnamelen;
6506
6507 count1 = 0;
6508 de = dirp;
6509 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6510 goto efault;
6511 tde = target_dirp;
6512 while (len > 0) {
6513 reclen = de->d_reclen;
6514 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6515 tde->d_reclen = tswap16(treclen);
6516 tde->d_ino = tswapl(de->d_ino);
6517 tde->d_off = tswapl(de->d_off);
6518 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6519 if (tnamelen > 256)
6520 tnamelen = 256;
6521 /* XXX: may not be correct */
6522 pstrcpy(tde->d_name, tnamelen, de->d_name);
6523 de = (struct linux_dirent *)((char *)de + reclen);
6524 len -= reclen;
6525 tde = (struct target_dirent *)((char *)tde + treclen);
6526 count1 += treclen;
6527 }
6528 ret = count1;
6529 unlock_user(target_dirp, arg2, ret);
6530 }
6531 free(dirp);
6532 }
6533 #else
6534 {
6535 struct linux_dirent *dirp;
6536 abi_long count = arg3;
6537
6538 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6539 goto efault;
6540 ret = get_errno(sys_getdents(arg1, dirp, count));
6541 if (!is_error(ret)) {
6542 struct linux_dirent *de;
6543 int len = ret;
6544 int reclen;
6545 de = dirp;
6546 while (len > 0) {
6547 reclen = de->d_reclen;
6548 if (reclen > len)
6549 break;
6550 de->d_reclen = tswap16(reclen);
6551 tswapls(&de->d_ino);
6552 tswapls(&de->d_off);
6553 de = (struct linux_dirent *)((char *)de + reclen);
6554 len -= reclen;
6555 }
6556 }
6557 unlock_user(dirp, arg2, ret);
6558 }
6559 #endif
6560 break;
6561 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6562 case TARGET_NR_getdents64:
6563 {
6564 struct linux_dirent64 *dirp;
6565 abi_long count = arg3;
6566 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6567 goto efault;
6568 ret = get_errno(sys_getdents64(arg1, dirp, count));
6569 if (!is_error(ret)) {
6570 struct linux_dirent64 *de;
6571 int len = ret;
6572 int reclen;
6573 de = dirp;
6574 while (len > 0) {
6575 reclen = de->d_reclen;
6576 if (reclen > len)
6577 break;
6578 de->d_reclen = tswap16(reclen);
6579 tswap64s((uint64_t *)&de->d_ino);
6580 tswap64s((uint64_t *)&de->d_off);
6581 de = (struct linux_dirent64 *)((char *)de + reclen);
6582 len -= reclen;
6583 }
6584 }
6585 unlock_user(dirp, arg2, ret);
6586 }
6587 break;
6588 #endif /* TARGET_NR_getdents64 */
6589 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6590 #ifdef TARGET_S390X
6591 case TARGET_NR_select:
6592 #else
6593 case TARGET_NR__newselect:
6594 #endif
6595 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6596 break;
6597 #endif
6598 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6599 # ifdef TARGET_NR_poll
6600 case TARGET_NR_poll:
6601 # endif
6602 # ifdef TARGET_NR_ppoll
6603 case TARGET_NR_ppoll:
6604 # endif
6605 {
6606 struct target_pollfd *target_pfd;
6607 unsigned int nfds = arg2;
6608 int timeout = arg3;
6609 struct pollfd *pfd;
6610 unsigned int i;
6611
6612 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6613 if (!target_pfd)
6614 goto efault;
6615
6616 pfd = alloca(sizeof(struct pollfd) * nfds);
6617 for(i = 0; i < nfds; i++) {
6618 pfd[i].fd = tswap32(target_pfd[i].fd);
6619 pfd[i].events = tswap16(target_pfd[i].events);
6620 }
6621
6622 # ifdef TARGET_NR_ppoll
6623 if (num == TARGET_NR_ppoll) {
6624 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6625 target_sigset_t *target_set;
6626 sigset_t _set, *set = &_set;
6627
6628 if (arg3) {
6629 if (target_to_host_timespec(timeout_ts, arg3)) {
6630 unlock_user(target_pfd, arg1, 0);
6631 goto efault;
6632 }
6633 } else {
6634 timeout_ts = NULL;
6635 }
6636
6637 if (arg4) {
6638 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6639 if (!target_set) {
6640 unlock_user(target_pfd, arg1, 0);
6641 goto efault;
6642 }
6643 target_to_host_sigset(set, target_set);
6644 } else {
6645 set = NULL;
6646 }
6647
6648 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6649
6650 if (!is_error(ret) && arg3) {
6651 host_to_target_timespec(arg3, timeout_ts);
6652 }
6653 if (arg4) {
6654 unlock_user(target_set, arg4, 0);
6655 }
6656 } else
6657 # endif
6658 ret = get_errno(poll(pfd, nfds, timeout));
6659
6660 if (!is_error(ret)) {
6661 for(i = 0; i < nfds; i++) {
6662 target_pfd[i].revents = tswap16(pfd[i].revents);
6663 }
6664 }
6665 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6666 }
6667 break;
6668 #endif
6669 case TARGET_NR_flock:
6670 /* NOTE: the flock constant seems to be the same for every
6671 Linux platform */
6672 ret = get_errno(flock(arg1, arg2));
6673 break;
6674 case TARGET_NR_readv:
6675 {
6676 int count = arg3;
6677 struct iovec *vec;
6678
6679 vec = alloca(count * sizeof(struct iovec));
6680 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6681 goto efault;
6682 ret = get_errno(readv(arg1, vec, count));
6683 unlock_iovec(vec, arg2, count, 1);
6684 }
6685 break;
6686 case TARGET_NR_writev:
6687 {
6688 int count = arg3;
6689 struct iovec *vec;
6690
6691 vec = alloca(count * sizeof(struct iovec));
6692 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6693 goto efault;
6694 ret = get_errno(writev(arg1, vec, count));
6695 unlock_iovec(vec, arg2, count, 0);
6696 }
6697 break;
6698 case TARGET_NR_getsid:
6699 ret = get_errno(getsid(arg1));
6700 break;
6701 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6702 case TARGET_NR_fdatasync:
6703 ret = get_errno(fdatasync(arg1));
6704 break;
6705 #endif
6706 case TARGET_NR__sysctl:
6707 /* We don't implement this, but ENOTDIR is always a safe
6708 return value. */
6709 ret = -TARGET_ENOTDIR;
6710 break;
6711 case TARGET_NR_sched_getaffinity:
6712 {
6713 unsigned int mask_size;
6714 unsigned long *mask;
6715
6716 /*
6717 * sched_getaffinity needs multiples of ulong, so need to take
6718 * care of mismatches between target ulong and host ulong sizes.
6719 */
6720 if (arg2 & (sizeof(abi_ulong) - 1)) {
6721 ret = -TARGET_EINVAL;
6722 break;
6723 }
6724 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6725
6726 mask = alloca(mask_size);
6727 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6728
6729 if (!is_error(ret)) {
6730 if (copy_to_user(arg3, mask, ret)) {
6731 goto efault;
6732 }
6733 }
6734 }
6735 break;
6736 case TARGET_NR_sched_setaffinity:
6737 {
6738 unsigned int mask_size;
6739 unsigned long *mask;
6740
6741 /*
6742 * sched_setaffinity needs multiples of ulong, so need to take
6743 * care of mismatches between target ulong and host ulong sizes.
6744 */
6745 if (arg2 & (sizeof(abi_ulong) - 1)) {
6746 ret = -TARGET_EINVAL;
6747 break;
6748 }
6749 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6750
6751 mask = alloca(mask_size);
6752 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6753 goto efault;
6754 }
6755 memcpy(mask, p, arg2);
6756 unlock_user_struct(p, arg2, 0);
6757
6758 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6759 }
6760 break;
6761 case TARGET_NR_sched_setparam:
6762 {
6763 struct sched_param *target_schp;
6764 struct sched_param schp;
6765
6766 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6767 goto efault;
6768 schp.sched_priority = tswap32(target_schp->sched_priority);
6769 unlock_user_struct(target_schp, arg2, 0);
6770 ret = get_errno(sched_setparam(arg1, &schp));
6771 }
6772 break;
6773 case TARGET_NR_sched_getparam:
6774 {
6775 struct sched_param *target_schp;
6776 struct sched_param schp;
6777 ret = get_errno(sched_getparam(arg1, &schp));
6778 if (!is_error(ret)) {
6779 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6780 goto efault;
6781 target_schp->sched_priority = tswap32(schp.sched_priority);
6782 unlock_user_struct(target_schp, arg2, 1);
6783 }
6784 }
6785 break;
6786 case TARGET_NR_sched_setscheduler:
6787 {
6788 struct sched_param *target_schp;
6789 struct sched_param schp;
6790 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6791 goto efault;
6792 schp.sched_priority = tswap32(target_schp->sched_priority);
6793 unlock_user_struct(target_schp, arg3, 0);
6794 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6795 }
6796 break;
6797 case TARGET_NR_sched_getscheduler:
6798 ret = get_errno(sched_getscheduler(arg1));
6799 break;
6800 case TARGET_NR_sched_yield:
6801 ret = get_errno(sched_yield());
6802 break;
6803 case TARGET_NR_sched_get_priority_max:
6804 ret = get_errno(sched_get_priority_max(arg1));
6805 break;
6806 case TARGET_NR_sched_get_priority_min:
6807 ret = get_errno(sched_get_priority_min(arg1));
6808 break;
6809 case TARGET_NR_sched_rr_get_interval:
6810 {
6811 struct timespec ts;
6812 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6813 if (!is_error(ret)) {
6814 host_to_target_timespec(arg2, &ts);
6815 }
6816 }
6817 break;
6818 case TARGET_NR_nanosleep:
6819 {
6820 struct timespec req, rem;
6821 target_to_host_timespec(&req, arg1);
6822 ret = get_errno(nanosleep(&req, &rem));
6823 if (is_error(ret) && arg2) {
6824 host_to_target_timespec(arg2, &rem);
6825 }
6826 }
6827 break;
6828 #ifdef TARGET_NR_query_module
6829 case TARGET_NR_query_module:
6830 goto unimplemented;
6831 #endif
6832 #ifdef TARGET_NR_nfsservctl
6833 case TARGET_NR_nfsservctl:
6834 goto unimplemented;
6835 #endif
6836 case TARGET_NR_prctl:
6837 switch (arg1)
6838 {
6839 case PR_GET_PDEATHSIG:
6840 {
6841 int deathsig;
6842 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6843 if (!is_error(ret) && arg2
6844 && put_user_ual(deathsig, arg2))
6845 goto efault;
6846 }
6847 break;
6848 default:
6849 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6850 break;
6851 }
6852 break;
6853 #ifdef TARGET_NR_arch_prctl
6854 case TARGET_NR_arch_prctl:
6855 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6856 ret = do_arch_prctl(cpu_env, arg1, arg2);
6857 break;
6858 #else
6859 goto unimplemented;
6860 #endif
6861 #endif
6862 #ifdef TARGET_NR_pread
6863 case TARGET_NR_pread:
6864 if (regpairs_aligned(cpu_env))
6865 arg4 = arg5;
6866 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6867 goto efault;
6868 ret = get_errno(pread(arg1, p, arg3, arg4));
6869 unlock_user(p, arg2, ret);
6870 break;
6871 case TARGET_NR_pwrite:
6872 if (regpairs_aligned(cpu_env))
6873 arg4 = arg5;
6874 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6875 goto efault;
6876 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6877 unlock_user(p, arg2, 0);
6878 break;
6879 #endif
6880 #ifdef TARGET_NR_pread64
6881 case TARGET_NR_pread64:
6882 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6883 goto efault;
6884 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6885 unlock_user(p, arg2, ret);
6886 break;
6887 case TARGET_NR_pwrite64:
6888 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6889 goto efault;
6890 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6891 unlock_user(p, arg2, 0);
6892 break;
6893 #endif
6894 case TARGET_NR_getcwd:
6895 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6896 goto efault;
6897 ret = get_errno(sys_getcwd1(p, arg2));
6898 unlock_user(p, arg1, ret);
6899 break;
6900 case TARGET_NR_capget:
6901 goto unimplemented;
6902 case TARGET_NR_capset:
6903 goto unimplemented;
6904 case TARGET_NR_sigaltstack:
6905 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6906 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6907 defined(TARGET_M68K) || defined(TARGET_S390X)
6908 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6909 break;
6910 #else
6911 goto unimplemented;
6912 #endif
6913 case TARGET_NR_sendfile:
6914 goto unimplemented;
6915 #ifdef TARGET_NR_getpmsg
6916 case TARGET_NR_getpmsg:
6917 goto unimplemented;
6918 #endif
6919 #ifdef TARGET_NR_putpmsg
6920 case TARGET_NR_putpmsg:
6921 goto unimplemented;
6922 #endif
6923 #ifdef TARGET_NR_vfork
6924 case TARGET_NR_vfork:
6925 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6926 0, 0, 0, 0));
6927 break;
6928 #endif
6929 #ifdef TARGET_NR_ugetrlimit
6930 case TARGET_NR_ugetrlimit:
6931 {
6932 struct rlimit rlim;
6933 int resource = target_to_host_resource(arg1);
6934 ret = get_errno(getrlimit(resource, &rlim));
6935 if (!is_error(ret)) {
6936 struct target_rlimit *target_rlim;
6937 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6938 goto efault;
6939 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6940 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6941 unlock_user_struct(target_rlim, arg2, 1);
6942 }
6943 break;
6944 }
6945 #endif
6946 #ifdef TARGET_NR_truncate64
6947 case TARGET_NR_truncate64:
6948 if (!(p = lock_user_string(arg1)))
6949 goto efault;
6950 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6951 unlock_user(p, arg1, 0);
6952 break;
6953 #endif
6954 #ifdef TARGET_NR_ftruncate64
6955 case TARGET_NR_ftruncate64:
6956 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6957 break;
6958 #endif
6959 #ifdef TARGET_NR_stat64
6960 case TARGET_NR_stat64:
6961 if (!(p = lock_user_string(arg1)))
6962 goto efault;
6963 ret = get_errno(stat(path(p), &st));
6964 unlock_user(p, arg1, 0);
6965 if (!is_error(ret))
6966 ret = host_to_target_stat64(cpu_env, arg2, &st);
6967 break;
6968 #endif
6969 #ifdef TARGET_NR_lstat64
6970 case TARGET_NR_lstat64:
6971 if (!(p = lock_user_string(arg1)))
6972 goto efault;
6973 ret = get_errno(lstat(path(p), &st));
6974 unlock_user(p, arg1, 0);
6975 if (!is_error(ret))
6976 ret = host_to_target_stat64(cpu_env, arg2, &st);
6977 break;
6978 #endif
6979 #ifdef TARGET_NR_fstat64
6980 case TARGET_NR_fstat64:
6981 ret = get_errno(fstat(arg1, &st));
6982 if (!is_error(ret))
6983 ret = host_to_target_stat64(cpu_env, arg2, &st);
6984 break;
6985 #endif
6986 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6987 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6988 #ifdef TARGET_NR_fstatat64
6989 case TARGET_NR_fstatat64:
6990 #endif
6991 #ifdef TARGET_NR_newfstatat
6992 case TARGET_NR_newfstatat:
6993 #endif
6994 if (!(p = lock_user_string(arg2)))
6995 goto efault;
6996 #ifdef __NR_fstatat64
6997 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6998 #else
6999 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7000 #endif
7001 if (!is_error(ret))
7002 ret = host_to_target_stat64(cpu_env, arg3, &st);
7003 break;
7004 #endif
7005 case TARGET_NR_lchown:
7006 if (!(p = lock_user_string(arg1)))
7007 goto efault;
7008 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7009 unlock_user(p, arg1, 0);
7010 break;
7011 #ifdef TARGET_NR_getuid
7012 case TARGET_NR_getuid:
7013 ret = get_errno(high2lowuid(getuid()));
7014 break;
7015 #endif
7016 #ifdef TARGET_NR_getgid
7017 case TARGET_NR_getgid:
7018 ret = get_errno(high2lowgid(getgid()));
7019 break;
7020 #endif
7021 #ifdef TARGET_NR_geteuid
7022 case TARGET_NR_geteuid:
7023 ret = get_errno(high2lowuid(geteuid()));
7024 break;
7025 #endif
7026 #ifdef TARGET_NR_getegid
7027 case TARGET_NR_getegid:
7028 ret = get_errno(high2lowgid(getegid()));
7029 break;
7030 #endif
7031 case TARGET_NR_setreuid:
7032 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7033 break;
7034 case TARGET_NR_setregid:
7035 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7036 break;
7037 case TARGET_NR_getgroups:
7038 {
7039 int gidsetsize = arg1;
7040 target_id *target_grouplist;
7041 gid_t *grouplist;
7042 int i;
7043
7044 grouplist = alloca(gidsetsize * sizeof(gid_t));
7045 ret = get_errno(getgroups(gidsetsize, grouplist));
7046 if (gidsetsize == 0)
7047 break;
7048 if (!is_error(ret)) {
7049 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7050 if (!target_grouplist)
7051 goto efault;
7052 for(i = 0;i < ret; i++)
7053 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7054 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7055 }
7056 }
7057 break;
7058 case TARGET_NR_setgroups:
7059 {
7060 int gidsetsize = arg1;
7061 target_id *target_grouplist;
7062 gid_t *grouplist;
7063 int i;
7064
7065 grouplist = alloca(gidsetsize * sizeof(gid_t));
7066 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7067 if (!target_grouplist) {
7068 ret = -TARGET_EFAULT;
7069 goto fail;
7070 }
7071 for(i = 0;i < gidsetsize; i++)
7072 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7073 unlock_user(target_grouplist, arg2, 0);
7074 ret = get_errno(setgroups(gidsetsize, grouplist));
7075 }
7076 break;
7077 case TARGET_NR_fchown:
7078 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7079 break;
7080 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7081 case TARGET_NR_fchownat:
7082 if (!(p = lock_user_string(arg2)))
7083 goto efault;
7084 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7085 unlock_user(p, arg2, 0);
7086 break;
7087 #endif
7088 #ifdef TARGET_NR_setresuid
7089 case TARGET_NR_setresuid:
7090 ret = get_errno(setresuid(low2highuid(arg1),
7091 low2highuid(arg2),
7092 low2highuid(arg3)));
7093 break;
7094 #endif
7095 #ifdef TARGET_NR_getresuid
7096 case TARGET_NR_getresuid:
7097 {
7098 uid_t ruid, euid, suid;
7099 ret = get_errno(getresuid(&ruid, &euid, &suid));
7100 if (!is_error(ret)) {
7101 if (put_user_u16(high2lowuid(ruid), arg1)
7102 || put_user_u16(high2lowuid(euid), arg2)
7103 || put_user_u16(high2lowuid(suid), arg3))
7104 goto efault;
7105 }
7106 }
7107 break;
7108 #endif
7109 #ifdef TARGET_NR_getresgid
7110 case TARGET_NR_setresgid:
7111 ret = get_errno(setresgid(low2highgid(arg1),
7112 low2highgid(arg2),
7113 low2highgid(arg3)));
7114 break;
7115 #endif
7116 #ifdef TARGET_NR_getresgid
7117 case TARGET_NR_getresgid:
7118 {
7119 gid_t rgid, egid, sgid;
7120 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7121 if (!is_error(ret)) {
7122 if (put_user_u16(high2lowgid(rgid), arg1)
7123 || put_user_u16(high2lowgid(egid), arg2)
7124 || put_user_u16(high2lowgid(sgid), arg3))
7125 goto efault;
7126 }
7127 }
7128 break;
7129 #endif
7130 case TARGET_NR_chown:
7131 if (!(p = lock_user_string(arg1)))
7132 goto efault;
7133 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7134 unlock_user(p, arg1, 0);
7135 break;
7136 case TARGET_NR_setuid:
7137 ret = get_errno(setuid(low2highuid(arg1)));
7138 break;
7139 case TARGET_NR_setgid:
7140 ret = get_errno(setgid(low2highgid(arg1)));
7141 break;
7142 case TARGET_NR_setfsuid:
7143 ret = get_errno(setfsuid(arg1));
7144 break;
7145 case TARGET_NR_setfsgid:
7146 ret = get_errno(setfsgid(arg1));
7147 break;
7148
7149 #ifdef TARGET_NR_lchown32
7150 case TARGET_NR_lchown32:
7151 if (!(p = lock_user_string(arg1)))
7152 goto efault;
7153 ret = get_errno(lchown(p, arg2, arg3));
7154 unlock_user(p, arg1, 0);
7155 break;
7156 #endif
7157 #ifdef TARGET_NR_getuid32
7158 case TARGET_NR_getuid32:
7159 ret = get_errno(getuid());
7160 break;
7161 #endif
7162
7163 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7164 /* Alpha specific */
7165 case TARGET_NR_getxuid:
7166 {
7167 uid_t euid;
7168 euid=geteuid();
7169 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7170 }
7171 ret = get_errno(getuid());
7172 break;
7173 #endif
7174 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7175 /* Alpha specific */
7176 case TARGET_NR_getxgid:
7177 {
7178 uid_t egid;
7179 egid=getegid();
7180 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7181 }
7182 ret = get_errno(getgid());
7183 break;
7184 #endif
7185 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7186 /* Alpha specific */
7187 case TARGET_NR_osf_getsysinfo:
7188 ret = -TARGET_EOPNOTSUPP;
7189 switch (arg1) {
7190 case TARGET_GSI_IEEE_FP_CONTROL:
7191 {
7192 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7193
7194 /* Copied from linux ieee_fpcr_to_swcr. */
7195 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7196 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7197 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7198 | SWCR_TRAP_ENABLE_DZE
7199 | SWCR_TRAP_ENABLE_OVF);
7200 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7201 | SWCR_TRAP_ENABLE_INE);
7202 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7203 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7204
7205 if (put_user_u64 (swcr, arg2))
7206 goto efault;
7207 ret = 0;
7208 }
7209 break;
7210
7211 /* case GSI_IEEE_STATE_AT_SIGNAL:
7212 -- Not implemented in linux kernel.
7213 case GSI_UACPROC:
7214 -- Retrieves current unaligned access state; not much used.
7215 case GSI_PROC_TYPE:
7216 -- Retrieves implver information; surely not used.
7217 case GSI_GET_HWRPB:
7218 -- Grabs a copy of the HWRPB; surely not used.
7219 */
7220 }
7221 break;
7222 #endif
7223 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7224 /* Alpha specific */
7225 case TARGET_NR_osf_setsysinfo:
7226 ret = -TARGET_EOPNOTSUPP;
7227 switch (arg1) {
7228 case TARGET_SSI_IEEE_FP_CONTROL:
7229 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7230 {
7231 uint64_t swcr, fpcr, orig_fpcr;
7232
7233 if (get_user_u64 (swcr, arg2))
7234 goto efault;
7235 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7236 fpcr = orig_fpcr & FPCR_DYN_MASK;
7237
7238 /* Copied from linux ieee_swcr_to_fpcr. */
7239 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7240 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7241 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7242 | SWCR_TRAP_ENABLE_DZE
7243 | SWCR_TRAP_ENABLE_OVF)) << 48;
7244 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7245 | SWCR_TRAP_ENABLE_INE)) << 57;
7246 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7247 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7248
7249 cpu_alpha_store_fpcr (cpu_env, fpcr);
7250 ret = 0;
7251
7252 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7253 /* Old exceptions are not signaled. */
7254 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7255
7256 /* If any exceptions set by this call, and are unmasked,
7257 send a signal. */
7258 /* ??? FIXME */
7259 }
7260 }
7261 break;
7262
7263 /* case SSI_NVPAIRS:
7264 -- Used with SSIN_UACPROC to enable unaligned accesses.
7265 case SSI_IEEE_STATE_AT_SIGNAL:
7266 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7267 -- Not implemented in linux kernel
7268 */
7269 }
7270 break;
7271 #endif
7272 #ifdef TARGET_NR_osf_sigprocmask
7273 /* Alpha specific. */
7274 case TARGET_NR_osf_sigprocmask:
7275 {
7276 abi_ulong mask;
7277 int how;
7278 sigset_t set, oldset;
7279
7280 switch(arg1) {
7281 case TARGET_SIG_BLOCK:
7282 how = SIG_BLOCK;
7283 break;
7284 case TARGET_SIG_UNBLOCK:
7285 how = SIG_UNBLOCK;
7286 break;
7287 case TARGET_SIG_SETMASK:
7288 how = SIG_SETMASK;
7289 break;
7290 default:
7291 ret = -TARGET_EINVAL;
7292 goto fail;
7293 }
7294 mask = arg2;
7295 target_to_host_old_sigset(&set, &mask);
7296 sigprocmask(how, &set, &oldset);
7297 host_to_target_old_sigset(&mask, &oldset);
7298 ret = mask;
7299 }
7300 break;
7301 #endif
7302
7303 #ifdef TARGET_NR_getgid32
7304 case TARGET_NR_getgid32:
7305 ret = get_errno(getgid());
7306 break;
7307 #endif
7308 #ifdef TARGET_NR_geteuid32
7309 case TARGET_NR_geteuid32:
7310 ret = get_errno(geteuid());
7311 break;
7312 #endif
7313 #ifdef TARGET_NR_getegid32
7314 case TARGET_NR_getegid32:
7315 ret = get_errno(getegid());
7316 break;
7317 #endif
7318 #ifdef TARGET_NR_setreuid32
7319 case TARGET_NR_setreuid32:
7320 ret = get_errno(setreuid(arg1, arg2));
7321 break;
7322 #endif
7323 #ifdef TARGET_NR_setregid32
7324 case TARGET_NR_setregid32:
7325 ret = get_errno(setregid(arg1, arg2));
7326 break;
7327 #endif
7328 #ifdef TARGET_NR_getgroups32
7329 case TARGET_NR_getgroups32:
7330 {
7331 int gidsetsize = arg1;
7332 uint32_t *target_grouplist;
7333 gid_t *grouplist;
7334 int i;
7335
7336 grouplist = alloca(gidsetsize * sizeof(gid_t));
7337 ret = get_errno(getgroups(gidsetsize, grouplist));
7338 if (gidsetsize == 0)
7339 break;
7340 if (!is_error(ret)) {
7341 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7342 if (!target_grouplist) {
7343 ret = -TARGET_EFAULT;
7344 goto fail;
7345 }
7346 for(i = 0;i < ret; i++)
7347 target_grouplist[i] = tswap32(grouplist[i]);
7348 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7349 }
7350 }
7351 break;
7352 #endif
7353 #ifdef TARGET_NR_setgroups32
7354 case TARGET_NR_setgroups32:
7355 {
7356 int gidsetsize = arg1;
7357 uint32_t *target_grouplist;
7358 gid_t *grouplist;
7359 int i;
7360
7361 grouplist = alloca(gidsetsize * sizeof(gid_t));
7362 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7363 if (!target_grouplist) {
7364 ret = -TARGET_EFAULT;
7365 goto fail;
7366 }
7367 for(i = 0;i < gidsetsize; i++)
7368 grouplist[i] = tswap32(target_grouplist[i]);
7369 unlock_user(target_grouplist, arg2, 0);
7370 ret = get_errno(setgroups(gidsetsize, grouplist));
7371 }
7372 break;
7373 #endif
7374 #ifdef TARGET_NR_fchown32
7375 case TARGET_NR_fchown32:
7376 ret = get_errno(fchown(arg1, arg2, arg3));
7377 break;
7378 #endif
7379 #ifdef TARGET_NR_setresuid32
7380 case TARGET_NR_setresuid32:
7381 ret = get_errno(setresuid(arg1, arg2, arg3));
7382 break;
7383 #endif
7384 #ifdef TARGET_NR_getresuid32
7385 case TARGET_NR_getresuid32:
7386 {
7387 uid_t ruid, euid, suid;
7388 ret = get_errno(getresuid(&ruid, &euid, &suid));
7389 if (!is_error(ret)) {
7390 if (put_user_u32(ruid, arg1)
7391 || put_user_u32(euid, arg2)
7392 || put_user_u32(suid, arg3))
7393 goto efault;
7394 }
7395 }
7396 break;
7397 #endif
7398 #ifdef TARGET_NR_setresgid32
7399 case TARGET_NR_setresgid32:
7400 ret = get_errno(setresgid(arg1, arg2, arg3));
7401 break;
7402 #endif
7403 #ifdef TARGET_NR_getresgid32
7404 case TARGET_NR_getresgid32:
7405 {
7406 gid_t rgid, egid, sgid;
7407 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7408 if (!is_error(ret)) {
7409 if (put_user_u32(rgid, arg1)
7410 || put_user_u32(egid, arg2)
7411 || put_user_u32(sgid, arg3))
7412 goto efault;
7413 }
7414 }
7415 break;
7416 #endif
7417 #ifdef TARGET_NR_chown32
7418 case TARGET_NR_chown32:
7419 if (!(p = lock_user_string(arg1)))
7420 goto efault;
7421 ret = get_errno(chown(p, arg2, arg3));
7422 unlock_user(p, arg1, 0);
7423 break;
7424 #endif
7425 #ifdef TARGET_NR_setuid32
7426 case TARGET_NR_setuid32:
7427 ret = get_errno(setuid(arg1));
7428 break;
7429 #endif
7430 #ifdef TARGET_NR_setgid32
7431 case TARGET_NR_setgid32:
7432 ret = get_errno(setgid(arg1));
7433 break;
7434 #endif
7435 #ifdef TARGET_NR_setfsuid32
7436 case TARGET_NR_setfsuid32:
7437 ret = get_errno(setfsuid(arg1));
7438 break;
7439 #endif
7440 #ifdef TARGET_NR_setfsgid32
7441 case TARGET_NR_setfsgid32:
7442 ret = get_errno(setfsgid(arg1));
7443 break;
7444 #endif
7445
7446 case TARGET_NR_pivot_root:
7447 goto unimplemented;
7448 #ifdef TARGET_NR_mincore
7449 case TARGET_NR_mincore:
7450 {
7451 void *a;
7452 ret = -TARGET_EFAULT;
7453 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7454 goto efault;
7455 if (!(p = lock_user_string(arg3)))
7456 goto mincore_fail;
7457 ret = get_errno(mincore(a, arg2, p));
7458 unlock_user(p, arg3, ret);
7459 mincore_fail:
7460 unlock_user(a, arg1, 0);
7461 }
7462 break;
7463 #endif
7464 #ifdef TARGET_NR_arm_fadvise64_64
7465 case TARGET_NR_arm_fadvise64_64:
7466 {
7467 /*
7468 * arm_fadvise64_64 looks like fadvise64_64 but
7469 * with different argument order
7470 */
7471 abi_long temp;
7472 temp = arg3;
7473 arg3 = arg4;
7474 arg4 = temp;
7475 }
7476 #endif
7477 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7478 #ifdef TARGET_NR_fadvise64_64
7479 case TARGET_NR_fadvise64_64:
7480 #endif
7481 #ifdef TARGET_NR_fadvise64
7482 case TARGET_NR_fadvise64:
7483 #endif
7484 #ifdef TARGET_S390X
7485 switch (arg4) {
7486 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7487 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7488 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7489 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7490 default: break;
7491 }
7492 #endif
7493 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7494 break;
7495 #endif
7496 #ifdef TARGET_NR_madvise
7497 case TARGET_NR_madvise:
7498 /* A straight passthrough may not be safe because qemu sometimes
7499 turns private flie-backed mappings into anonymous mappings.
7500 This will break MADV_DONTNEED.
7501 This is a hint, so ignoring and returning success is ok. */
7502 ret = get_errno(0);
7503 break;
7504 #endif
7505 #if TARGET_ABI_BITS == 32
7506 case TARGET_NR_fcntl64:
7507 {
7508 int cmd;
7509 struct flock64 fl;
7510 struct target_flock64 *target_fl;
7511 #ifdef TARGET_ARM
7512 struct target_eabi_flock64 *target_efl;
7513 #endif
7514
7515 cmd = target_to_host_fcntl_cmd(arg2);
7516 if (cmd == -TARGET_EINVAL)
7517 return cmd;
7518
7519 switch(arg2) {
7520 case TARGET_F_GETLK64:
7521 #ifdef TARGET_ARM
7522 if (((CPUARMState *)cpu_env)->eabi) {
7523 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7524 goto efault;
7525 fl.l_type = tswap16(target_efl->l_type);
7526 fl.l_whence = tswap16(target_efl->l_whence);
7527 fl.l_start = tswap64(target_efl->l_start);
7528 fl.l_len = tswap64(target_efl->l_len);
7529 fl.l_pid = tswap32(target_efl->l_pid);
7530 unlock_user_struct(target_efl, arg3, 0);
7531 } else
7532 #endif
7533 {
7534 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7535 goto efault;
7536 fl.l_type = tswap16(target_fl->l_type);
7537 fl.l_whence = tswap16(target_fl->l_whence);
7538 fl.l_start = tswap64(target_fl->l_start);
7539 fl.l_len = tswap64(target_fl->l_len);
7540 fl.l_pid = tswap32(target_fl->l_pid);
7541 unlock_user_struct(target_fl, arg3, 0);
7542 }
7543 ret = get_errno(fcntl(arg1, cmd, &fl));
7544 if (ret == 0) {
7545 #ifdef TARGET_ARM
7546 if (((CPUARMState *)cpu_env)->eabi) {
7547 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7548 goto efault;
7549 target_efl->l_type = tswap16(fl.l_type);
7550 target_efl->l_whence = tswap16(fl.l_whence);
7551 target_efl->l_start = tswap64(fl.l_start);
7552 target_efl->l_len = tswap64(fl.l_len);
7553 target_efl->l_pid = tswap32(fl.l_pid);
7554 unlock_user_struct(target_efl, arg3, 1);
7555 } else
7556 #endif
7557 {
7558 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7559 goto efault;
7560 target_fl->l_type = tswap16(fl.l_type);
7561 target_fl->l_whence = tswap16(fl.l_whence);
7562 target_fl->l_start = tswap64(fl.l_start);
7563 target_fl->l_len = tswap64(fl.l_len);
7564 target_fl->l_pid = tswap32(fl.l_pid);
7565 unlock_user_struct(target_fl, arg3, 1);
7566 }
7567 }
7568 break;
7569
7570 case TARGET_F_SETLK64:
7571 case TARGET_F_SETLKW64:
7572 #ifdef TARGET_ARM
7573 if (((CPUARMState *)cpu_env)->eabi) {
7574 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7575 goto efault;
7576 fl.l_type = tswap16(target_efl->l_type);
7577 fl.l_whence = tswap16(target_efl->l_whence);
7578 fl.l_start = tswap64(target_efl->l_start);
7579 fl.l_len = tswap64(target_efl->l_len);
7580 fl.l_pid = tswap32(target_efl->l_pid);
7581 unlock_user_struct(target_efl, arg3, 0);
7582 } else
7583 #endif
7584 {
7585 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7586 goto efault;
7587 fl.l_type = tswap16(target_fl->l_type);
7588 fl.l_whence = tswap16(target_fl->l_whence);
7589 fl.l_start = tswap64(target_fl->l_start);
7590 fl.l_len = tswap64(target_fl->l_len);
7591 fl.l_pid = tswap32(target_fl->l_pid);
7592 unlock_user_struct(target_fl, arg3, 0);
7593 }
7594 ret = get_errno(fcntl(arg1, cmd, &fl));
7595 break;
7596 default:
7597 ret = do_fcntl(arg1, arg2, arg3);
7598 break;
7599 }
7600 break;
7601 }
7602 #endif
7603 #ifdef TARGET_NR_cacheflush
7604 case TARGET_NR_cacheflush:
7605 /* self-modifying code is handled automatically, so nothing needed */
7606 ret = 0;
7607 break;
7608 #endif
7609 #ifdef TARGET_NR_security
7610 case TARGET_NR_security:
7611 goto unimplemented;
7612 #endif
7613 #ifdef TARGET_NR_getpagesize
7614 case TARGET_NR_getpagesize:
7615 ret = TARGET_PAGE_SIZE;
7616 break;
7617 #endif
7618 case TARGET_NR_gettid:
7619 ret = get_errno(gettid());
7620 break;
7621 #ifdef TARGET_NR_readahead
7622 case TARGET_NR_readahead:
7623 #if TARGET_ABI_BITS == 32
7624 if (regpairs_aligned(cpu_env)) {
7625 arg2 = arg3;
7626 arg3 = arg4;
7627 arg4 = arg5;
7628 }
7629 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7630 #else
7631 ret = get_errno(readahead(arg1, arg2, arg3));
7632 #endif
7633 break;
7634 #endif
7635 #ifdef TARGET_NR_setxattr
7636 case TARGET_NR_setxattr:
7637 case TARGET_NR_lsetxattr:
7638 case TARGET_NR_fsetxattr:
7639 case TARGET_NR_getxattr:
7640 case TARGET_NR_lgetxattr:
7641 case TARGET_NR_fgetxattr:
7642 case TARGET_NR_listxattr:
7643 case TARGET_NR_llistxattr:
7644 case TARGET_NR_flistxattr:
7645 case TARGET_NR_removexattr:
7646 case TARGET_NR_lremovexattr:
7647 case TARGET_NR_fremovexattr:
7648 ret = -TARGET_EOPNOTSUPP;
7649 break;
7650 #endif
7651 #ifdef TARGET_NR_set_thread_area
7652 case TARGET_NR_set_thread_area:
7653 #if defined(TARGET_MIPS)
7654 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7655 ret = 0;
7656 break;
7657 #elif defined(TARGET_CRIS)
7658 if (arg1 & 0xff)
7659 ret = -TARGET_EINVAL;
7660 else {
7661 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7662 ret = 0;
7663 }
7664 break;
7665 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7666 ret = do_set_thread_area(cpu_env, arg1);
7667 break;
7668 #else
7669 goto unimplemented_nowarn;
7670 #endif
7671 #endif
7672 #ifdef TARGET_NR_get_thread_area
7673 case TARGET_NR_get_thread_area:
7674 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7675 ret = do_get_thread_area(cpu_env, arg1);
7676 #else
7677 goto unimplemented_nowarn;
7678 #endif
7679 #endif
7680 #ifdef TARGET_NR_getdomainname
7681 case TARGET_NR_getdomainname:
7682 goto unimplemented_nowarn;
7683 #endif
7684
7685 #ifdef TARGET_NR_clock_gettime
7686 case TARGET_NR_clock_gettime:
7687 {
7688 struct timespec ts;
7689 ret = get_errno(clock_gettime(arg1, &ts));
7690 if (!is_error(ret)) {
7691 host_to_target_timespec(arg2, &ts);
7692 }
7693 break;
7694 }
7695 #endif
7696 #ifdef TARGET_NR_clock_getres
7697 case TARGET_NR_clock_getres:
7698 {
7699 struct timespec ts;
7700 ret = get_errno(clock_getres(arg1, &ts));
7701 if (!is_error(ret)) {
7702 host_to_target_timespec(arg2, &ts);
7703 }
7704 break;
7705 }
7706 #endif
7707 #ifdef TARGET_NR_clock_nanosleep
7708 case TARGET_NR_clock_nanosleep:
7709 {
7710 struct timespec ts;
7711 target_to_host_timespec(&ts, arg3);
7712 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7713 if (arg4)
7714 host_to_target_timespec(arg4, &ts);
7715 break;
7716 }
7717 #endif
7718
7719 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7720 case TARGET_NR_set_tid_address:
7721 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7722 break;
7723 #endif
7724
7725 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7726 case TARGET_NR_tkill:
7727 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7728 break;
7729 #endif
7730
7731 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7732 case TARGET_NR_tgkill:
7733 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7734 target_to_host_signal(arg3)));
7735 break;
7736 #endif
7737
7738 #ifdef TARGET_NR_set_robust_list
7739 case TARGET_NR_set_robust_list:
7740 goto unimplemented_nowarn;
7741 #endif
7742
7743 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7744 case TARGET_NR_utimensat:
7745 {
7746 struct timespec *tsp, ts[2];
7747 if (!arg3) {
7748 tsp = NULL;
7749 } else {
7750 target_to_host_timespec(ts, arg3);
7751 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7752 tsp = ts;
7753 }
7754 if (!arg2)
7755 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7756 else {
7757 if (!(p = lock_user_string(arg2))) {
7758 ret = -TARGET_EFAULT;
7759 goto fail;
7760 }
7761 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7762 unlock_user(p, arg2, 0);
7763 }
7764 }
7765 break;
7766 #endif
7767 #if defined(CONFIG_USE_NPTL)
7768 case TARGET_NR_futex:
7769 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7770 break;
7771 #endif
7772 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7773 case TARGET_NR_inotify_init:
7774 ret = get_errno(sys_inotify_init());
7775 break;
7776 #endif
7777 #ifdef CONFIG_INOTIFY1
7778 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7779 case TARGET_NR_inotify_init1:
7780 ret = get_errno(sys_inotify_init1(arg1));
7781 break;
7782 #endif
7783 #endif
7784 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7785 case TARGET_NR_inotify_add_watch:
7786 p = lock_user_string(arg2);
7787 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7788 unlock_user(p, arg2, 0);
7789 break;
7790 #endif
7791 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7792 case TARGET_NR_inotify_rm_watch:
7793 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7794 break;
7795 #endif
7796
7797 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7798 case TARGET_NR_mq_open:
7799 {
7800 struct mq_attr posix_mq_attr;
7801
7802 p = lock_user_string(arg1 - 1);
7803 if (arg4 != 0)
7804 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7805 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7806 unlock_user (p, arg1, 0);
7807 }
7808 break;
7809
7810 case TARGET_NR_mq_unlink:
7811 p = lock_user_string(arg1 - 1);
7812 ret = get_errno(mq_unlink(p));
7813 unlock_user (p, arg1, 0);
7814 break;
7815
7816 case TARGET_NR_mq_timedsend:
7817 {
7818 struct timespec ts;
7819
7820 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7821 if (arg5 != 0) {
7822 target_to_host_timespec(&ts, arg5);
7823 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7824 host_to_target_timespec(arg5, &ts);
7825 }
7826 else
7827 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7828 unlock_user (p, arg2, arg3);
7829 }
7830 break;
7831
7832 case TARGET_NR_mq_timedreceive:
7833 {
7834 struct timespec ts;
7835 unsigned int prio;
7836
7837 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7838 if (arg5 != 0) {
7839 target_to_host_timespec(&ts, arg5);
7840 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7841 host_to_target_timespec(arg5, &ts);
7842 }
7843 else
7844 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7845 unlock_user (p, arg2, arg3);
7846 if (arg4 != 0)
7847 put_user_u32(prio, arg4);
7848 }
7849 break;
7850
7851 /* Not implemented for now... */
7852 /* case TARGET_NR_mq_notify: */
7853 /* break; */
7854
7855 case TARGET_NR_mq_getsetattr:
7856 {
7857 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7858 ret = 0;
7859 if (arg3 != 0) {
7860 ret = mq_getattr(arg1, &posix_mq_attr_out);
7861 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7862 }
7863 if (arg2 != 0) {
7864 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7865 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7866 }
7867
7868 }
7869 break;
7870 #endif
7871
7872 #ifdef CONFIG_SPLICE
7873 #ifdef TARGET_NR_tee
7874 case TARGET_NR_tee:
7875 {
7876 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7877 }
7878 break;
7879 #endif
7880 #ifdef TARGET_NR_splice
7881 case TARGET_NR_splice:
7882 {
7883 loff_t loff_in, loff_out;
7884 loff_t *ploff_in = NULL, *ploff_out = NULL;
7885 if(arg2) {
7886 get_user_u64(loff_in, arg2);
7887 ploff_in = &loff_in;
7888 }
7889 if(arg4) {
7890 get_user_u64(loff_out, arg2);
7891 ploff_out = &loff_out;
7892 }
7893 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
7894 }
7895 break;
7896 #endif
7897 #ifdef TARGET_NR_vmsplice
7898 case TARGET_NR_vmsplice:
7899 {
7900 int count = arg3;
7901 struct iovec *vec;
7902
7903 vec = alloca(count * sizeof(struct iovec));
7904 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7905 goto efault;
7906 ret = get_errno(vmsplice(arg1, vec, count, arg4));
7907 unlock_iovec(vec, arg2, count, 0);
7908 }
7909 break;
7910 #endif
7911 #endif /* CONFIG_SPLICE */
7912 #ifdef CONFIG_EVENTFD
7913 #if defined(TARGET_NR_eventfd)
7914 case TARGET_NR_eventfd:
7915 ret = get_errno(eventfd(arg1, 0));
7916 break;
7917 #endif
7918 #if defined(TARGET_NR_eventfd2)
7919 case TARGET_NR_eventfd2:
7920 ret = get_errno(eventfd(arg1, arg2));
7921 break;
7922 #endif
7923 #endif /* CONFIG_EVENTFD */
7924 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7925 case TARGET_NR_fallocate:
7926 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
7927 break;
7928 #endif
7929 #if defined(CONFIG_SYNC_FILE_RANGE)
7930 #if defined(TARGET_NR_sync_file_range)
7931 case TARGET_NR_sync_file_range:
7932 #if TARGET_ABI_BITS == 32
7933 #if defined(TARGET_MIPS)
7934 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7935 target_offset64(arg5, arg6), arg7));
7936 #else
7937 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
7938 target_offset64(arg4, arg5), arg6));
7939 #endif /* !TARGET_MIPS */
7940 #else
7941 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
7942 #endif
7943 break;
7944 #endif
7945 #if defined(TARGET_NR_sync_file_range2)
7946 case TARGET_NR_sync_file_range2:
7947 /* This is like sync_file_range but the arguments are reordered */
7948 #if TARGET_ABI_BITS == 32
7949 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7950 target_offset64(arg5, arg6), arg2));
7951 #else
7952 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
7953 #endif
7954 break;
7955 #endif
7956 #endif
7957 #if defined(CONFIG_EPOLL)
7958 #if defined(TARGET_NR_epoll_create)
7959 case TARGET_NR_epoll_create:
7960 ret = get_errno(epoll_create(arg1));
7961 break;
7962 #endif
7963 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7964 case TARGET_NR_epoll_create1:
7965 ret = get_errno(epoll_create1(arg1));
7966 break;
7967 #endif
7968 #if defined(TARGET_NR_epoll_ctl)
7969 case TARGET_NR_epoll_ctl:
7970 {
7971 struct epoll_event ep;
7972 struct epoll_event *epp = 0;
7973 if (arg4) {
7974 struct target_epoll_event *target_ep;
7975 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
7976 goto efault;
7977 }
7978 ep.events = tswap32(target_ep->events);
7979 /* The epoll_data_t union is just opaque data to the kernel,
7980 * so we transfer all 64 bits across and need not worry what
7981 * actual data type it is.
7982 */
7983 ep.data.u64 = tswap64(target_ep->data.u64);
7984 unlock_user_struct(target_ep, arg4, 0);
7985 epp = &ep;
7986 }
7987 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
7988 break;
7989 }
7990 #endif
7991
7992 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7993 #define IMPLEMENT_EPOLL_PWAIT
7994 #endif
7995 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7996 #if defined(TARGET_NR_epoll_wait)
7997 case TARGET_NR_epoll_wait:
7998 #endif
7999 #if defined(IMPLEMENT_EPOLL_PWAIT)
8000 case TARGET_NR_epoll_pwait:
8001 #endif
8002 {
8003 struct target_epoll_event *target_ep;
8004 struct epoll_event *ep;
8005 int epfd = arg1;
8006 int maxevents = arg3;
8007 int timeout = arg4;
8008
8009 target_ep = lock_user(VERIFY_WRITE, arg2,
8010 maxevents * sizeof(struct target_epoll_event), 1);
8011 if (!target_ep) {
8012 goto efault;
8013 }
8014
8015 ep = alloca(maxevents * sizeof(struct epoll_event));
8016
8017 switch (num) {
8018 #if defined(IMPLEMENT_EPOLL_PWAIT)
8019 case TARGET_NR_epoll_pwait:
8020 {
8021 target_sigset_t *target_set;
8022 sigset_t _set, *set = &_set;
8023
8024 if (arg5) {
8025 target_set = lock_user(VERIFY_READ, arg5,
8026 sizeof(target_sigset_t), 1);
8027 if (!target_set) {
8028 unlock_user(target_ep, arg2, 0);
8029 goto efault;
8030 }
8031 target_to_host_sigset(set, target_set);
8032 unlock_user(target_set, arg5, 0);
8033 } else {
8034 set = NULL;
8035 }
8036
8037 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8038 break;
8039 }
8040 #endif
8041 #if defined(TARGET_NR_epoll_wait)
8042 case TARGET_NR_epoll_wait:
8043 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8044 break;
8045 #endif
8046 default:
8047 ret = -TARGET_ENOSYS;
8048 }
8049 if (!is_error(ret)) {
8050 int i;
8051 for (i = 0; i < ret; i++) {
8052 target_ep[i].events = tswap32(ep[i].events);
8053 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8054 }
8055 }
8056 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8057 break;
8058 }
8059 #endif
8060 #endif
8061 #ifdef TARGET_NR_prlimit64
8062 case TARGET_NR_prlimit64:
8063 {
8064 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8065 struct target_rlimit64 *target_rnew, *target_rold;
8066 struct host_rlimit64 rnew, rold, *rnewp = 0;
8067 if (arg3) {
8068 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8069 goto efault;
8070 }
8071 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8072 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8073 unlock_user_struct(target_rnew, arg3, 0);
8074 rnewp = &rnew;
8075 }
8076
8077 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8078 if (!is_error(ret) && arg4) {
8079 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8080 goto efault;
8081 }
8082 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8083 target_rold->rlim_max = tswap64(rold.rlim_max);
8084 unlock_user_struct(target_rold, arg4, 1);
8085 }
8086 break;
8087 }
8088 #endif
8089 default:
8090 unimplemented:
8091 gemu_log("qemu: Unsupported syscall: %d\n", num);
8092 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8093 unimplemented_nowarn:
8094 #endif
8095 ret = -TARGET_ENOSYS;
8096 break;
8097 }
8098 fail:
8099 #ifdef DEBUG
8100 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8101 #endif
8102 if(do_strace)
8103 print_syscall_ret(num, ret);
8104 return ret;
8105 efault:
8106 ret = -TARGET_EFAULT;
8107 goto fail;
8108 }