]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
Merge branch 'ppc-for-upstream' of git://repo.or.cz/qemu/agraf
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81
82 #define termios host_termios
83 #define winsize host_winsize
84 #define termio host_termio
85 #define sgttyb host_sgttyb /* same as target */
86 #define tchars host_tchars /* same as target */
87 #define ltchars host_ltchars /* same as target */
88
89 #include <linux/termios.h>
90 #include <linux/unistd.h>
91 #include <linux/utsname.h>
92 #include <linux/cdrom.h>
93 #include <linux/hdreg.h>
94 #include <linux/soundcard.h>
95 #include <linux/kd.h>
96 #include <linux/mtio.h>
97 #include <linux/fs.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
100 #endif
101 #include <linux/fb.h>
102 #include <linux/vt.h>
103 #include <linux/dm-ioctl.h>
104 #include "linux_loop.h"
105 #include "cpu-uname.h"
106
107 #include "qemu.h"
108
109 #if defined(CONFIG_USE_NPTL)
110 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
111 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
112 #else
113 /* XXX: Hardcode the above values. */
114 #define CLONE_NPTL_FLAGS2 0
115 #endif
116
117 //#define DEBUG
118
119 //#include <linux/msdos_fs.h>
120 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
121 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
122
123
124 #undef _syscall0
125 #undef _syscall1
126 #undef _syscall2
127 #undef _syscall3
128 #undef _syscall4
129 #undef _syscall5
130 #undef _syscall6
131
132 #define _syscall0(type,name) \
133 static type name (void) \
134 { \
135 return syscall(__NR_##name); \
136 }
137
138 #define _syscall1(type,name,type1,arg1) \
139 static type name (type1 arg1) \
140 { \
141 return syscall(__NR_##name, arg1); \
142 }
143
144 #define _syscall2(type,name,type1,arg1,type2,arg2) \
145 static type name (type1 arg1,type2 arg2) \
146 { \
147 return syscall(__NR_##name, arg1, arg2); \
148 }
149
150 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
151 static type name (type1 arg1,type2 arg2,type3 arg3) \
152 { \
153 return syscall(__NR_##name, arg1, arg2, arg3); \
154 }
155
156 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
158 { \
159 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
160 }
161
162 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
165 { \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
167 }
168
169
170 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 type5,arg5,type6,arg6) \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
173 type6 arg6) \
174 { \
175 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
176 }
177
178
179 #define __NR_sys_uname __NR_uname
180 #define __NR_sys_faccessat __NR_faccessat
181 #define __NR_sys_fchmodat __NR_fchmodat
182 #define __NR_sys_fchownat __NR_fchownat
183 #define __NR_sys_fstatat64 __NR_fstatat64
184 #define __NR_sys_futimesat __NR_futimesat
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_linkat __NR_linkat
190 #define __NR_sys_mkdirat __NR_mkdirat
191 #define __NR_sys_mknodat __NR_mknodat
192 #define __NR_sys_newfstatat __NR_newfstatat
193 #define __NR_sys_openat __NR_openat
194 #define __NR_sys_readlinkat __NR_readlinkat
195 #define __NR_sys_renameat __NR_renameat
196 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
197 #define __NR_sys_symlinkat __NR_symlinkat
198 #define __NR_sys_syslog __NR_syslog
199 #define __NR_sys_tgkill __NR_tgkill
200 #define __NR_sys_tkill __NR_tkill
201 #define __NR_sys_unlinkat __NR_unlinkat
202 #define __NR_sys_utimensat __NR_utimensat
203 #define __NR_sys_futex __NR_futex
204 #define __NR_sys_inotify_init __NR_inotify_init
205 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
206 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
207
208 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
209 defined(__s390x__)
210 #define __NR__llseek __NR_lseek
211 #endif
212
213 #ifdef __NR_gettid
214 _syscall0(int, gettid)
215 #else
216 /* This is a replacement for the host gettid() and must return a host
217 errno. */
218 static int gettid(void) {
219 return -ENOSYS;
220 }
221 #endif
222 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
223 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
224 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
225 #endif
226 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
227 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
228 loff_t *, res, uint, wh);
229 #endif
230 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
231 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
232 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
233 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
234 #endif
235 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
236 _syscall2(int,sys_tkill,int,tid,int,sig)
237 #endif
238 #ifdef __NR_exit_group
239 _syscall1(int,exit_group,int,error_code)
240 #endif
241 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
242 _syscall1(int,set_tid_address,int *,tidptr)
243 #endif
244 #if defined(CONFIG_USE_NPTL)
245 #if defined(TARGET_NR_futex) && defined(__NR_futex)
246 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
247 const struct timespec *,timeout,int *,uaddr2,int,val3)
248 #endif
249 #endif
250 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
251 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
252 unsigned long *, user_mask_ptr);
253 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
254 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
255 unsigned long *, user_mask_ptr);
256 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
257 void *, arg);
258
259 static bitmask_transtbl fcntl_flags_tbl[] = {
260 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
261 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
262 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
263 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
264 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
265 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
266 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
267 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
268 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
269 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
270 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
271 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
272 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
275 #endif
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
278 #endif
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
281 #endif
282 #if defined(O_PATH)
283 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
284 #endif
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
288 #endif
289 { 0, 0, 0, 0 }
290 };
291
292 #define COPY_UTSNAME_FIELD(dest, src) \
293 do { \
294 /* __NEW_UTS_LEN doesn't include terminating null */ \
295 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
296 (dest)[__NEW_UTS_LEN] = '\0'; \
297 } while (0)
298
299 static int sys_uname(struct new_utsname *buf)
300 {
301 struct utsname uts_buf;
302
303 if (uname(&uts_buf) < 0)
304 return (-1);
305
306 /*
307 * Just in case these have some differences, we
308 * translate utsname to new_utsname (which is the
309 * struct linux kernel uses).
310 */
311
312 memset(buf, 0, sizeof(*buf));
313 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
314 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
315 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
316 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
317 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
318 #ifdef _GNU_SOURCE
319 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
320 #endif
321 return (0);
322
323 #undef COPY_UTSNAME_FIELD
324 }
325
326 static int sys_getcwd1(char *buf, size_t size)
327 {
328 if (getcwd(buf, size) == NULL) {
329 /* getcwd() sets errno */
330 return (-1);
331 }
332 return strlen(buf)+1;
333 }
334
335 #ifdef CONFIG_ATFILE
336 /*
337 * Host system seems to have atfile syscall stubs available. We
338 * now enable them one by one as specified by target syscall_nr.h.
339 */
340
341 #ifdef TARGET_NR_faccessat
342 static int sys_faccessat(int dirfd, const char *pathname, int mode)
343 {
344 return (faccessat(dirfd, pathname, mode, 0));
345 }
346 #endif
347 #ifdef TARGET_NR_fchmodat
348 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
349 {
350 return (fchmodat(dirfd, pathname, mode, 0));
351 }
352 #endif
353 #if defined(TARGET_NR_fchownat)
354 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
355 gid_t group, int flags)
356 {
357 return (fchownat(dirfd, pathname, owner, group, flags));
358 }
359 #endif
360 #ifdef __NR_fstatat64
361 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
362 int flags)
363 {
364 return (fstatat(dirfd, pathname, buf, flags));
365 }
366 #endif
367 #ifdef __NR_newfstatat
368 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
369 int flags)
370 {
371 return (fstatat(dirfd, pathname, buf, flags));
372 }
373 #endif
374 #ifdef TARGET_NR_futimesat
375 static int sys_futimesat(int dirfd, const char *pathname,
376 const struct timeval times[2])
377 {
378 return (futimesat(dirfd, pathname, times));
379 }
380 #endif
381 #ifdef TARGET_NR_linkat
382 static int sys_linkat(int olddirfd, const char *oldpath,
383 int newdirfd, const char *newpath, int flags)
384 {
385 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
386 }
387 #endif
388 #ifdef TARGET_NR_mkdirat
389 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
390 {
391 return (mkdirat(dirfd, pathname, mode));
392 }
393 #endif
394 #ifdef TARGET_NR_mknodat
395 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
396 dev_t dev)
397 {
398 return (mknodat(dirfd, pathname, mode, dev));
399 }
400 #endif
401 #ifdef TARGET_NR_openat
402 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
403 {
404 /*
405 * open(2) has extra parameter 'mode' when called with
406 * flag O_CREAT.
407 */
408 if ((flags & O_CREAT) != 0) {
409 return (openat(dirfd, pathname, flags, mode));
410 }
411 return (openat(dirfd, pathname, flags));
412 }
413 #endif
414 #ifdef TARGET_NR_readlinkat
415 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
416 {
417 return (readlinkat(dirfd, pathname, buf, bufsiz));
418 }
419 #endif
420 #ifdef TARGET_NR_renameat
421 static int sys_renameat(int olddirfd, const char *oldpath,
422 int newdirfd, const char *newpath)
423 {
424 return (renameat(olddirfd, oldpath, newdirfd, newpath));
425 }
426 #endif
427 #ifdef TARGET_NR_symlinkat
428 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
429 {
430 return (symlinkat(oldpath, newdirfd, newpath));
431 }
432 #endif
433 #ifdef TARGET_NR_unlinkat
434 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
435 {
436 return (unlinkat(dirfd, pathname, flags));
437 }
438 #endif
439 #else /* !CONFIG_ATFILE */
440
441 /*
442 * Try direct syscalls instead
443 */
444 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
445 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
446 #endif
447 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
448 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
449 #endif
450 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
451 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
452 uid_t,owner,gid_t,group,int,flags)
453 #endif
454 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
455 defined(__NR_fstatat64)
456 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
457 struct stat *,buf,int,flags)
458 #endif
459 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
460 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
461 const struct timeval *,times)
462 #endif
463 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
464 defined(__NR_newfstatat)
465 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
466 struct stat *,buf,int,flags)
467 #endif
468 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
469 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
470 int,newdirfd,const char *,newpath,int,flags)
471 #endif
472 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
473 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
474 #endif
475 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
476 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
477 mode_t,mode,dev_t,dev)
478 #endif
479 #if defined(TARGET_NR_openat) && defined(__NR_openat)
480 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
481 #endif
482 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
483 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
484 char *,buf,size_t,bufsize)
485 #endif
486 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
487 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
488 int,newdirfd,const char *,newpath)
489 #endif
490 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
491 _syscall3(int,sys_symlinkat,const char *,oldpath,
492 int,newdirfd,const char *,newpath)
493 #endif
494 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
495 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
496 #endif
497
498 #endif /* CONFIG_ATFILE */
499
500 #ifdef CONFIG_UTIMENSAT
501 static int sys_utimensat(int dirfd, const char *pathname,
502 const struct timespec times[2], int flags)
503 {
504 if (pathname == NULL)
505 return futimens(dirfd, times);
506 else
507 return utimensat(dirfd, pathname, times, flags);
508 }
509 #else
510 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
511 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
512 const struct timespec *,tsp,int,flags)
513 #endif
514 #endif /* CONFIG_UTIMENSAT */
515
516 #ifdef CONFIG_INOTIFY
517 #include <sys/inotify.h>
518
519 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
520 static int sys_inotify_init(void)
521 {
522 return (inotify_init());
523 }
524 #endif
525 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
526 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
527 {
528 return (inotify_add_watch(fd, pathname, mask));
529 }
530 #endif
531 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
532 static int sys_inotify_rm_watch(int fd, int32_t wd)
533 {
534 return (inotify_rm_watch(fd, wd));
535 }
536 #endif
537 #ifdef CONFIG_INOTIFY1
538 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
539 static int sys_inotify_init1(int flags)
540 {
541 return (inotify_init1(flags));
542 }
543 #endif
544 #endif
545 #else
546 /* Userspace can usually survive runtime without inotify */
547 #undef TARGET_NR_inotify_init
548 #undef TARGET_NR_inotify_init1
549 #undef TARGET_NR_inotify_add_watch
550 #undef TARGET_NR_inotify_rm_watch
551 #endif /* CONFIG_INOTIFY */
552
553 #if defined(TARGET_NR_ppoll)
554 #ifndef __NR_ppoll
555 # define __NR_ppoll -1
556 #endif
557 #define __NR_sys_ppoll __NR_ppoll
558 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
559 struct timespec *, timeout, const __sigset_t *, sigmask,
560 size_t, sigsetsize)
561 #endif
562
563 #if defined(TARGET_NR_pselect6)
564 #ifndef __NR_pselect6
565 # define __NR_pselect6 -1
566 #endif
567 #define __NR_sys_pselect6 __NR_pselect6
568 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
569 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
570 #endif
571
572 #if defined(TARGET_NR_prlimit64)
573 #ifndef __NR_prlimit64
574 # define __NR_prlimit64 -1
575 #endif
576 #define __NR_sys_prlimit64 __NR_prlimit64
577 /* The glibc rlimit structure may not be that used by the underlying syscall */
578 struct host_rlimit64 {
579 uint64_t rlim_cur;
580 uint64_t rlim_max;
581 };
582 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
583 const struct host_rlimit64 *, new_limit,
584 struct host_rlimit64 *, old_limit)
585 #endif
586
587 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
588 #ifdef TARGET_ARM
589 static inline int regpairs_aligned(void *cpu_env) {
590 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
591 }
592 #elif defined(TARGET_MIPS)
593 static inline int regpairs_aligned(void *cpu_env) { return 1; }
594 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
595 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
596 * of registers which translates to the same as ARM/MIPS, because we start with
597 * r3 as arg1 */
598 static inline int regpairs_aligned(void *cpu_env) { return 1; }
599 #else
600 static inline int regpairs_aligned(void *cpu_env) { return 0; }
601 #endif
602
603 #define ERRNO_TABLE_SIZE 1200
604
605 /* target_to_host_errno_table[] is initialized from
606 * host_to_target_errno_table[] in syscall_init(). */
607 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
608 };
609
610 /*
611 * This list is the union of errno values overridden in asm-<arch>/errno.h
612 * minus the errnos that are not actually generic to all archs.
613 */
614 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
615 [EIDRM] = TARGET_EIDRM,
616 [ECHRNG] = TARGET_ECHRNG,
617 [EL2NSYNC] = TARGET_EL2NSYNC,
618 [EL3HLT] = TARGET_EL3HLT,
619 [EL3RST] = TARGET_EL3RST,
620 [ELNRNG] = TARGET_ELNRNG,
621 [EUNATCH] = TARGET_EUNATCH,
622 [ENOCSI] = TARGET_ENOCSI,
623 [EL2HLT] = TARGET_EL2HLT,
624 [EDEADLK] = TARGET_EDEADLK,
625 [ENOLCK] = TARGET_ENOLCK,
626 [EBADE] = TARGET_EBADE,
627 [EBADR] = TARGET_EBADR,
628 [EXFULL] = TARGET_EXFULL,
629 [ENOANO] = TARGET_ENOANO,
630 [EBADRQC] = TARGET_EBADRQC,
631 [EBADSLT] = TARGET_EBADSLT,
632 [EBFONT] = TARGET_EBFONT,
633 [ENOSTR] = TARGET_ENOSTR,
634 [ENODATA] = TARGET_ENODATA,
635 [ETIME] = TARGET_ETIME,
636 [ENOSR] = TARGET_ENOSR,
637 [ENONET] = TARGET_ENONET,
638 [ENOPKG] = TARGET_ENOPKG,
639 [EREMOTE] = TARGET_EREMOTE,
640 [ENOLINK] = TARGET_ENOLINK,
641 [EADV] = TARGET_EADV,
642 [ESRMNT] = TARGET_ESRMNT,
643 [ECOMM] = TARGET_ECOMM,
644 [EPROTO] = TARGET_EPROTO,
645 [EDOTDOT] = TARGET_EDOTDOT,
646 [EMULTIHOP] = TARGET_EMULTIHOP,
647 [EBADMSG] = TARGET_EBADMSG,
648 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
649 [EOVERFLOW] = TARGET_EOVERFLOW,
650 [ENOTUNIQ] = TARGET_ENOTUNIQ,
651 [EBADFD] = TARGET_EBADFD,
652 [EREMCHG] = TARGET_EREMCHG,
653 [ELIBACC] = TARGET_ELIBACC,
654 [ELIBBAD] = TARGET_ELIBBAD,
655 [ELIBSCN] = TARGET_ELIBSCN,
656 [ELIBMAX] = TARGET_ELIBMAX,
657 [ELIBEXEC] = TARGET_ELIBEXEC,
658 [EILSEQ] = TARGET_EILSEQ,
659 [ENOSYS] = TARGET_ENOSYS,
660 [ELOOP] = TARGET_ELOOP,
661 [ERESTART] = TARGET_ERESTART,
662 [ESTRPIPE] = TARGET_ESTRPIPE,
663 [ENOTEMPTY] = TARGET_ENOTEMPTY,
664 [EUSERS] = TARGET_EUSERS,
665 [ENOTSOCK] = TARGET_ENOTSOCK,
666 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
667 [EMSGSIZE] = TARGET_EMSGSIZE,
668 [EPROTOTYPE] = TARGET_EPROTOTYPE,
669 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
670 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
671 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
672 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
673 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
674 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
675 [EADDRINUSE] = TARGET_EADDRINUSE,
676 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
677 [ENETDOWN] = TARGET_ENETDOWN,
678 [ENETUNREACH] = TARGET_ENETUNREACH,
679 [ENETRESET] = TARGET_ENETRESET,
680 [ECONNABORTED] = TARGET_ECONNABORTED,
681 [ECONNRESET] = TARGET_ECONNRESET,
682 [ENOBUFS] = TARGET_ENOBUFS,
683 [EISCONN] = TARGET_EISCONN,
684 [ENOTCONN] = TARGET_ENOTCONN,
685 [EUCLEAN] = TARGET_EUCLEAN,
686 [ENOTNAM] = TARGET_ENOTNAM,
687 [ENAVAIL] = TARGET_ENAVAIL,
688 [EISNAM] = TARGET_EISNAM,
689 [EREMOTEIO] = TARGET_EREMOTEIO,
690 [ESHUTDOWN] = TARGET_ESHUTDOWN,
691 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
692 [ETIMEDOUT] = TARGET_ETIMEDOUT,
693 [ECONNREFUSED] = TARGET_ECONNREFUSED,
694 [EHOSTDOWN] = TARGET_EHOSTDOWN,
695 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
696 [EALREADY] = TARGET_EALREADY,
697 [EINPROGRESS] = TARGET_EINPROGRESS,
698 [ESTALE] = TARGET_ESTALE,
699 [ECANCELED] = TARGET_ECANCELED,
700 [ENOMEDIUM] = TARGET_ENOMEDIUM,
701 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
702 #ifdef ENOKEY
703 [ENOKEY] = TARGET_ENOKEY,
704 #endif
705 #ifdef EKEYEXPIRED
706 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
707 #endif
708 #ifdef EKEYREVOKED
709 [EKEYREVOKED] = TARGET_EKEYREVOKED,
710 #endif
711 #ifdef EKEYREJECTED
712 [EKEYREJECTED] = TARGET_EKEYREJECTED,
713 #endif
714 #ifdef EOWNERDEAD
715 [EOWNERDEAD] = TARGET_EOWNERDEAD,
716 #endif
717 #ifdef ENOTRECOVERABLE
718 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
719 #endif
720 };
721
722 static inline int host_to_target_errno(int err)
723 {
724 if(host_to_target_errno_table[err])
725 return host_to_target_errno_table[err];
726 return err;
727 }
728
729 static inline int target_to_host_errno(int err)
730 {
731 if (target_to_host_errno_table[err])
732 return target_to_host_errno_table[err];
733 return err;
734 }
735
736 static inline abi_long get_errno(abi_long ret)
737 {
738 if (ret == -1)
739 return -host_to_target_errno(errno);
740 else
741 return ret;
742 }
743
744 static inline int is_error(abi_long ret)
745 {
746 return (abi_ulong)ret >= (abi_ulong)(-4096);
747 }
748
749 char *target_strerror(int err)
750 {
751 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
752 return NULL;
753 }
754 return strerror(target_to_host_errno(err));
755 }
756
757 static abi_ulong target_brk;
758 static abi_ulong target_original_brk;
759 static abi_ulong brk_page;
760
761 void target_set_brk(abi_ulong new_brk)
762 {
763 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
764 brk_page = HOST_PAGE_ALIGN(target_brk);
765 }
766
767 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
768 #define DEBUGF_BRK(message, args...)
769
770 /* do_brk() must return target values and target errnos. */
771 abi_long do_brk(abi_ulong new_brk)
772 {
773 abi_long mapped_addr;
774 int new_alloc_size;
775
776 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
777
778 if (!new_brk) {
779 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
780 return target_brk;
781 }
782 if (new_brk < target_original_brk) {
783 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
784 target_brk);
785 return target_brk;
786 }
787
788 /* If the new brk is less than the highest page reserved to the
789 * target heap allocation, set it and we're almost done... */
790 if (new_brk <= brk_page) {
791 /* Heap contents are initialized to zero, as for anonymous
792 * mapped pages. */
793 if (new_brk > target_brk) {
794 memset(g2h(target_brk), 0, new_brk - target_brk);
795 }
796 target_brk = new_brk;
797 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
798 return target_brk;
799 }
800
801 /* We need to allocate more memory after the brk... Note that
802 * we don't use MAP_FIXED because that will map over the top of
803 * any existing mapping (like the one with the host libc or qemu
804 * itself); instead we treat "mapped but at wrong address" as
805 * a failure and unmap again.
806 */
807 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
808 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
809 PROT_READ|PROT_WRITE,
810 MAP_ANON|MAP_PRIVATE, 0, 0));
811
812 if (mapped_addr == brk_page) {
813 /* Heap contents are initialized to zero, as for anonymous
814 * mapped pages. Technically the new pages are already
815 * initialized to zero since they *are* anonymous mapped
816 * pages, however we have to take care with the contents that
817 * come from the remaining part of the previous page: it may
818 * contains garbage data due to a previous heap usage (grown
819 * then shrunken). */
820 memset(g2h(target_brk), 0, brk_page - target_brk);
821
822 target_brk = new_brk;
823 brk_page = HOST_PAGE_ALIGN(target_brk);
824 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
825 target_brk);
826 return target_brk;
827 } else if (mapped_addr != -1) {
828 /* Mapped but at wrong address, meaning there wasn't actually
829 * enough space for this brk.
830 */
831 target_munmap(mapped_addr, new_alloc_size);
832 mapped_addr = -1;
833 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
834 }
835 else {
836 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
837 }
838
839 #if defined(TARGET_ALPHA)
840 /* We (partially) emulate OSF/1 on Alpha, which requires we
841 return a proper errno, not an unchanged brk value. */
842 return -TARGET_ENOMEM;
843 #endif
844 /* For everything else, return the previous break. */
845 return target_brk;
846 }
847
848 static inline abi_long copy_from_user_fdset(fd_set *fds,
849 abi_ulong target_fds_addr,
850 int n)
851 {
852 int i, nw, j, k;
853 abi_ulong b, *target_fds;
854
855 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
856 if (!(target_fds = lock_user(VERIFY_READ,
857 target_fds_addr,
858 sizeof(abi_ulong) * nw,
859 1)))
860 return -TARGET_EFAULT;
861
862 FD_ZERO(fds);
863 k = 0;
864 for (i = 0; i < nw; i++) {
865 /* grab the abi_ulong */
866 __get_user(b, &target_fds[i]);
867 for (j = 0; j < TARGET_ABI_BITS; j++) {
868 /* check the bit inside the abi_ulong */
869 if ((b >> j) & 1)
870 FD_SET(k, fds);
871 k++;
872 }
873 }
874
875 unlock_user(target_fds, target_fds_addr, 0);
876
877 return 0;
878 }
879
880 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
881 abi_ulong target_fds_addr,
882 int n)
883 {
884 if (target_fds_addr) {
885 if (copy_from_user_fdset(fds, target_fds_addr, n))
886 return -TARGET_EFAULT;
887 *fds_ptr = fds;
888 } else {
889 *fds_ptr = NULL;
890 }
891 return 0;
892 }
893
894 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
895 const fd_set *fds,
896 int n)
897 {
898 int i, nw, j, k;
899 abi_long v;
900 abi_ulong *target_fds;
901
902 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
903 if (!(target_fds = lock_user(VERIFY_WRITE,
904 target_fds_addr,
905 sizeof(abi_ulong) * nw,
906 0)))
907 return -TARGET_EFAULT;
908
909 k = 0;
910 for (i = 0; i < nw; i++) {
911 v = 0;
912 for (j = 0; j < TARGET_ABI_BITS; j++) {
913 v |= ((FD_ISSET(k, fds) != 0) << j);
914 k++;
915 }
916 __put_user(v, &target_fds[i]);
917 }
918
919 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
920
921 return 0;
922 }
923
924 #if defined(__alpha__)
925 #define HOST_HZ 1024
926 #else
927 #define HOST_HZ 100
928 #endif
929
930 static inline abi_long host_to_target_clock_t(long ticks)
931 {
932 #if HOST_HZ == TARGET_HZ
933 return ticks;
934 #else
935 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
936 #endif
937 }
938
939 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
940 const struct rusage *rusage)
941 {
942 struct target_rusage *target_rusage;
943
944 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
945 return -TARGET_EFAULT;
946 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
947 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
948 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
949 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
950 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
951 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
952 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
953 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
954 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
955 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
956 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
957 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
958 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
959 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
960 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
961 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
962 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
963 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
964 unlock_user_struct(target_rusage, target_addr, 1);
965
966 return 0;
967 }
968
969 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
970 {
971 abi_ulong target_rlim_swap;
972 rlim_t result;
973
974 target_rlim_swap = tswapal(target_rlim);
975 if (target_rlim_swap == TARGET_RLIM_INFINITY)
976 return RLIM_INFINITY;
977
978 result = target_rlim_swap;
979 if (target_rlim_swap != (rlim_t)result)
980 return RLIM_INFINITY;
981
982 return result;
983 }
984
985 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
986 {
987 abi_ulong target_rlim_swap;
988 abi_ulong result;
989
990 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
991 target_rlim_swap = TARGET_RLIM_INFINITY;
992 else
993 target_rlim_swap = rlim;
994 result = tswapal(target_rlim_swap);
995
996 return result;
997 }
998
999 static inline int target_to_host_resource(int code)
1000 {
1001 switch (code) {
1002 case TARGET_RLIMIT_AS:
1003 return RLIMIT_AS;
1004 case TARGET_RLIMIT_CORE:
1005 return RLIMIT_CORE;
1006 case TARGET_RLIMIT_CPU:
1007 return RLIMIT_CPU;
1008 case TARGET_RLIMIT_DATA:
1009 return RLIMIT_DATA;
1010 case TARGET_RLIMIT_FSIZE:
1011 return RLIMIT_FSIZE;
1012 case TARGET_RLIMIT_LOCKS:
1013 return RLIMIT_LOCKS;
1014 case TARGET_RLIMIT_MEMLOCK:
1015 return RLIMIT_MEMLOCK;
1016 case TARGET_RLIMIT_MSGQUEUE:
1017 return RLIMIT_MSGQUEUE;
1018 case TARGET_RLIMIT_NICE:
1019 return RLIMIT_NICE;
1020 case TARGET_RLIMIT_NOFILE:
1021 return RLIMIT_NOFILE;
1022 case TARGET_RLIMIT_NPROC:
1023 return RLIMIT_NPROC;
1024 case TARGET_RLIMIT_RSS:
1025 return RLIMIT_RSS;
1026 case TARGET_RLIMIT_RTPRIO:
1027 return RLIMIT_RTPRIO;
1028 case TARGET_RLIMIT_SIGPENDING:
1029 return RLIMIT_SIGPENDING;
1030 case TARGET_RLIMIT_STACK:
1031 return RLIMIT_STACK;
1032 default:
1033 return code;
1034 }
1035 }
1036
1037 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1038 abi_ulong target_tv_addr)
1039 {
1040 struct target_timeval *target_tv;
1041
1042 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1043 return -TARGET_EFAULT;
1044
1045 __get_user(tv->tv_sec, &target_tv->tv_sec);
1046 __get_user(tv->tv_usec, &target_tv->tv_usec);
1047
1048 unlock_user_struct(target_tv, target_tv_addr, 0);
1049
1050 return 0;
1051 }
1052
1053 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1054 const struct timeval *tv)
1055 {
1056 struct target_timeval *target_tv;
1057
1058 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1059 return -TARGET_EFAULT;
1060
1061 __put_user(tv->tv_sec, &target_tv->tv_sec);
1062 __put_user(tv->tv_usec, &target_tv->tv_usec);
1063
1064 unlock_user_struct(target_tv, target_tv_addr, 1);
1065
1066 return 0;
1067 }
1068
1069 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1070 #include <mqueue.h>
1071
1072 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1073 abi_ulong target_mq_attr_addr)
1074 {
1075 struct target_mq_attr *target_mq_attr;
1076
1077 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1078 target_mq_attr_addr, 1))
1079 return -TARGET_EFAULT;
1080
1081 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1082 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1083 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1084 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1085
1086 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1087
1088 return 0;
1089 }
1090
1091 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1092 const struct mq_attr *attr)
1093 {
1094 struct target_mq_attr *target_mq_attr;
1095
1096 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1097 target_mq_attr_addr, 0))
1098 return -TARGET_EFAULT;
1099
1100 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1101 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1102 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1103 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1104
1105 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1106
1107 return 0;
1108 }
1109 #endif
1110
1111 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1112 /* do_select() must return target values and target errnos. */
1113 static abi_long do_select(int n,
1114 abi_ulong rfd_addr, abi_ulong wfd_addr,
1115 abi_ulong efd_addr, abi_ulong target_tv_addr)
1116 {
1117 fd_set rfds, wfds, efds;
1118 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1119 struct timeval tv, *tv_ptr;
1120 abi_long ret;
1121
1122 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1123 if (ret) {
1124 return ret;
1125 }
1126 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1127 if (ret) {
1128 return ret;
1129 }
1130 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1131 if (ret) {
1132 return ret;
1133 }
1134
1135 if (target_tv_addr) {
1136 if (copy_from_user_timeval(&tv, target_tv_addr))
1137 return -TARGET_EFAULT;
1138 tv_ptr = &tv;
1139 } else {
1140 tv_ptr = NULL;
1141 }
1142
1143 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1144
1145 if (!is_error(ret)) {
1146 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1147 return -TARGET_EFAULT;
1148 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1149 return -TARGET_EFAULT;
1150 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1151 return -TARGET_EFAULT;
1152
1153 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1154 return -TARGET_EFAULT;
1155 }
1156
1157 return ret;
1158 }
1159 #endif
1160
1161 static abi_long do_pipe2(int host_pipe[], int flags)
1162 {
1163 #ifdef CONFIG_PIPE2
1164 return pipe2(host_pipe, flags);
1165 #else
1166 return -ENOSYS;
1167 #endif
1168 }
1169
1170 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1171 int flags, int is_pipe2)
1172 {
1173 int host_pipe[2];
1174 abi_long ret;
1175 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1176
1177 if (is_error(ret))
1178 return get_errno(ret);
1179
1180 /* Several targets have special calling conventions for the original
1181 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1182 if (!is_pipe2) {
1183 #if defined(TARGET_ALPHA)
1184 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1185 return host_pipe[0];
1186 #elif defined(TARGET_MIPS)
1187 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1188 return host_pipe[0];
1189 #elif defined(TARGET_SH4)
1190 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1191 return host_pipe[0];
1192 #endif
1193 }
1194
1195 if (put_user_s32(host_pipe[0], pipedes)
1196 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1197 return -TARGET_EFAULT;
1198 return get_errno(ret);
1199 }
1200
1201 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1202 abi_ulong target_addr,
1203 socklen_t len)
1204 {
1205 struct target_ip_mreqn *target_smreqn;
1206
1207 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1208 if (!target_smreqn)
1209 return -TARGET_EFAULT;
1210 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1211 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1212 if (len == sizeof(struct target_ip_mreqn))
1213 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1214 unlock_user(target_smreqn, target_addr, 0);
1215
1216 return 0;
1217 }
1218
1219 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1220 abi_ulong target_addr,
1221 socklen_t len)
1222 {
1223 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1224 sa_family_t sa_family;
1225 struct target_sockaddr *target_saddr;
1226
1227 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1228 if (!target_saddr)
1229 return -TARGET_EFAULT;
1230
1231 sa_family = tswap16(target_saddr->sa_family);
1232
1233 /* Oops. The caller might send a incomplete sun_path; sun_path
1234 * must be terminated by \0 (see the manual page), but
1235 * unfortunately it is quite common to specify sockaddr_un
1236 * length as "strlen(x->sun_path)" while it should be
1237 * "strlen(...) + 1". We'll fix that here if needed.
1238 * Linux kernel has a similar feature.
1239 */
1240
1241 if (sa_family == AF_UNIX) {
1242 if (len < unix_maxlen && len > 0) {
1243 char *cp = (char*)target_saddr;
1244
1245 if ( cp[len-1] && !cp[len] )
1246 len++;
1247 }
1248 if (len > unix_maxlen)
1249 len = unix_maxlen;
1250 }
1251
1252 memcpy(addr, target_saddr, len);
1253 addr->sa_family = sa_family;
1254 unlock_user(target_saddr, target_addr, 0);
1255
1256 return 0;
1257 }
1258
1259 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1260 struct sockaddr *addr,
1261 socklen_t len)
1262 {
1263 struct target_sockaddr *target_saddr;
1264
1265 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1266 if (!target_saddr)
1267 return -TARGET_EFAULT;
1268 memcpy(target_saddr, addr, len);
1269 target_saddr->sa_family = tswap16(addr->sa_family);
1270 unlock_user(target_saddr, target_addr, len);
1271
1272 return 0;
1273 }
1274
1275 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1276 struct target_msghdr *target_msgh)
1277 {
1278 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1279 abi_long msg_controllen;
1280 abi_ulong target_cmsg_addr;
1281 struct target_cmsghdr *target_cmsg;
1282 socklen_t space = 0;
1283
1284 msg_controllen = tswapal(target_msgh->msg_controllen);
1285 if (msg_controllen < sizeof (struct target_cmsghdr))
1286 goto the_end;
1287 target_cmsg_addr = tswapal(target_msgh->msg_control);
1288 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1289 if (!target_cmsg)
1290 return -TARGET_EFAULT;
1291
1292 while (cmsg && target_cmsg) {
1293 void *data = CMSG_DATA(cmsg);
1294 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1295
1296 int len = tswapal(target_cmsg->cmsg_len)
1297 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1298
1299 space += CMSG_SPACE(len);
1300 if (space > msgh->msg_controllen) {
1301 space -= CMSG_SPACE(len);
1302 gemu_log("Host cmsg overflow\n");
1303 break;
1304 }
1305
1306 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1307 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1308 cmsg->cmsg_len = CMSG_LEN(len);
1309
1310 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1311 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1312 memcpy(data, target_data, len);
1313 } else {
1314 int *fd = (int *)data;
1315 int *target_fd = (int *)target_data;
1316 int i, numfds = len / sizeof(int);
1317
1318 for (i = 0; i < numfds; i++)
1319 fd[i] = tswap32(target_fd[i]);
1320 }
1321
1322 cmsg = CMSG_NXTHDR(msgh, cmsg);
1323 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1324 }
1325 unlock_user(target_cmsg, target_cmsg_addr, 0);
1326 the_end:
1327 msgh->msg_controllen = space;
1328 return 0;
1329 }
1330
1331 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1332 struct msghdr *msgh)
1333 {
1334 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1335 abi_long msg_controllen;
1336 abi_ulong target_cmsg_addr;
1337 struct target_cmsghdr *target_cmsg;
1338 socklen_t space = 0;
1339
1340 msg_controllen = tswapal(target_msgh->msg_controllen);
1341 if (msg_controllen < sizeof (struct target_cmsghdr))
1342 goto the_end;
1343 target_cmsg_addr = tswapal(target_msgh->msg_control);
1344 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1345 if (!target_cmsg)
1346 return -TARGET_EFAULT;
1347
1348 while (cmsg && target_cmsg) {
1349 void *data = CMSG_DATA(cmsg);
1350 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1351
1352 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1353
1354 space += TARGET_CMSG_SPACE(len);
1355 if (space > msg_controllen) {
1356 space -= TARGET_CMSG_SPACE(len);
1357 gemu_log("Target cmsg overflow\n");
1358 break;
1359 }
1360
1361 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1362 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1363 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1364
1365 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1366 (cmsg->cmsg_type == SCM_RIGHTS)) {
1367 int *fd = (int *)data;
1368 int *target_fd = (int *)target_data;
1369 int i, numfds = len / sizeof(int);
1370
1371 for (i = 0; i < numfds; i++)
1372 target_fd[i] = tswap32(fd[i]);
1373 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1374 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1375 (len == sizeof(struct timeval))) {
1376 /* copy struct timeval to target */
1377 struct timeval *tv = (struct timeval *)data;
1378 struct target_timeval *target_tv =
1379 (struct target_timeval *)target_data;
1380
1381 target_tv->tv_sec = tswapal(tv->tv_sec);
1382 target_tv->tv_usec = tswapal(tv->tv_usec);
1383 } else {
1384 gemu_log("Unsupported ancillary data: %d/%d\n",
1385 cmsg->cmsg_level, cmsg->cmsg_type);
1386 memcpy(target_data, data, len);
1387 }
1388
1389 cmsg = CMSG_NXTHDR(msgh, cmsg);
1390 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1391 }
1392 unlock_user(target_cmsg, target_cmsg_addr, space);
1393 the_end:
1394 target_msgh->msg_controllen = tswapal(space);
1395 return 0;
1396 }
1397
1398 /* do_setsockopt() Must return target values and target errnos. */
1399 static abi_long do_setsockopt(int sockfd, int level, int optname,
1400 abi_ulong optval_addr, socklen_t optlen)
1401 {
1402 abi_long ret;
1403 int val;
1404 struct ip_mreqn *ip_mreq;
1405 struct ip_mreq_source *ip_mreq_source;
1406
1407 switch(level) {
1408 case SOL_TCP:
1409 /* TCP options all take an 'int' value. */
1410 if (optlen < sizeof(uint32_t))
1411 return -TARGET_EINVAL;
1412
1413 if (get_user_u32(val, optval_addr))
1414 return -TARGET_EFAULT;
1415 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1416 break;
1417 case SOL_IP:
1418 switch(optname) {
1419 case IP_TOS:
1420 case IP_TTL:
1421 case IP_HDRINCL:
1422 case IP_ROUTER_ALERT:
1423 case IP_RECVOPTS:
1424 case IP_RETOPTS:
1425 case IP_PKTINFO:
1426 case IP_MTU_DISCOVER:
1427 case IP_RECVERR:
1428 case IP_RECVTOS:
1429 #ifdef IP_FREEBIND
1430 case IP_FREEBIND:
1431 #endif
1432 case IP_MULTICAST_TTL:
1433 case IP_MULTICAST_LOOP:
1434 val = 0;
1435 if (optlen >= sizeof(uint32_t)) {
1436 if (get_user_u32(val, optval_addr))
1437 return -TARGET_EFAULT;
1438 } else if (optlen >= 1) {
1439 if (get_user_u8(val, optval_addr))
1440 return -TARGET_EFAULT;
1441 }
1442 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1443 break;
1444 case IP_ADD_MEMBERSHIP:
1445 case IP_DROP_MEMBERSHIP:
1446 if (optlen < sizeof (struct target_ip_mreq) ||
1447 optlen > sizeof (struct target_ip_mreqn))
1448 return -TARGET_EINVAL;
1449
1450 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1451 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1452 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1453 break;
1454
1455 case IP_BLOCK_SOURCE:
1456 case IP_UNBLOCK_SOURCE:
1457 case IP_ADD_SOURCE_MEMBERSHIP:
1458 case IP_DROP_SOURCE_MEMBERSHIP:
1459 if (optlen != sizeof (struct target_ip_mreq_source))
1460 return -TARGET_EINVAL;
1461
1462 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1463 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1464 unlock_user (ip_mreq_source, optval_addr, 0);
1465 break;
1466
1467 default:
1468 goto unimplemented;
1469 }
1470 break;
1471 case SOL_RAW:
1472 switch (optname) {
1473 case ICMP_FILTER:
1474 /* struct icmp_filter takes an u32 value */
1475 if (optlen < sizeof(uint32_t)) {
1476 return -TARGET_EINVAL;
1477 }
1478
1479 if (get_user_u32(val, optval_addr)) {
1480 return -TARGET_EFAULT;
1481 }
1482 ret = get_errno(setsockopt(sockfd, level, optname,
1483 &val, sizeof(val)));
1484 break;
1485
1486 default:
1487 goto unimplemented;
1488 }
1489 break;
1490 case TARGET_SOL_SOCKET:
1491 switch (optname) {
1492 /* Options with 'int' argument. */
1493 case TARGET_SO_DEBUG:
1494 optname = SO_DEBUG;
1495 break;
1496 case TARGET_SO_REUSEADDR:
1497 optname = SO_REUSEADDR;
1498 break;
1499 case TARGET_SO_TYPE:
1500 optname = SO_TYPE;
1501 break;
1502 case TARGET_SO_ERROR:
1503 optname = SO_ERROR;
1504 break;
1505 case TARGET_SO_DONTROUTE:
1506 optname = SO_DONTROUTE;
1507 break;
1508 case TARGET_SO_BROADCAST:
1509 optname = SO_BROADCAST;
1510 break;
1511 case TARGET_SO_SNDBUF:
1512 optname = SO_SNDBUF;
1513 break;
1514 case TARGET_SO_RCVBUF:
1515 optname = SO_RCVBUF;
1516 break;
1517 case TARGET_SO_KEEPALIVE:
1518 optname = SO_KEEPALIVE;
1519 break;
1520 case TARGET_SO_OOBINLINE:
1521 optname = SO_OOBINLINE;
1522 break;
1523 case TARGET_SO_NO_CHECK:
1524 optname = SO_NO_CHECK;
1525 break;
1526 case TARGET_SO_PRIORITY:
1527 optname = SO_PRIORITY;
1528 break;
1529 #ifdef SO_BSDCOMPAT
1530 case TARGET_SO_BSDCOMPAT:
1531 optname = SO_BSDCOMPAT;
1532 break;
1533 #endif
1534 case TARGET_SO_PASSCRED:
1535 optname = SO_PASSCRED;
1536 break;
1537 case TARGET_SO_TIMESTAMP:
1538 optname = SO_TIMESTAMP;
1539 break;
1540 case TARGET_SO_RCVLOWAT:
1541 optname = SO_RCVLOWAT;
1542 break;
1543 case TARGET_SO_RCVTIMEO:
1544 optname = SO_RCVTIMEO;
1545 break;
1546 case TARGET_SO_SNDTIMEO:
1547 optname = SO_SNDTIMEO;
1548 break;
1549 break;
1550 default:
1551 goto unimplemented;
1552 }
1553 if (optlen < sizeof(uint32_t))
1554 return -TARGET_EINVAL;
1555
1556 if (get_user_u32(val, optval_addr))
1557 return -TARGET_EFAULT;
1558 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1559 break;
1560 default:
1561 unimplemented:
1562 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1563 ret = -TARGET_ENOPROTOOPT;
1564 }
1565 return ret;
1566 }
1567
1568 /* do_getsockopt() Must return target values and target errnos. */
1569 static abi_long do_getsockopt(int sockfd, int level, int optname,
1570 abi_ulong optval_addr, abi_ulong optlen)
1571 {
1572 abi_long ret;
1573 int len, val;
1574 socklen_t lv;
1575
1576 switch(level) {
1577 case TARGET_SOL_SOCKET:
1578 level = SOL_SOCKET;
1579 switch (optname) {
1580 /* These don't just return a single integer */
1581 case TARGET_SO_LINGER:
1582 case TARGET_SO_RCVTIMEO:
1583 case TARGET_SO_SNDTIMEO:
1584 case TARGET_SO_PEERNAME:
1585 goto unimplemented;
1586 case TARGET_SO_PEERCRED: {
1587 struct ucred cr;
1588 socklen_t crlen;
1589 struct target_ucred *tcr;
1590
1591 if (get_user_u32(len, optlen)) {
1592 return -TARGET_EFAULT;
1593 }
1594 if (len < 0) {
1595 return -TARGET_EINVAL;
1596 }
1597
1598 crlen = sizeof(cr);
1599 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1600 &cr, &crlen));
1601 if (ret < 0) {
1602 return ret;
1603 }
1604 if (len > crlen) {
1605 len = crlen;
1606 }
1607 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1608 return -TARGET_EFAULT;
1609 }
1610 __put_user(cr.pid, &tcr->pid);
1611 __put_user(cr.uid, &tcr->uid);
1612 __put_user(cr.gid, &tcr->gid);
1613 unlock_user_struct(tcr, optval_addr, 1);
1614 if (put_user_u32(len, optlen)) {
1615 return -TARGET_EFAULT;
1616 }
1617 break;
1618 }
1619 /* Options with 'int' argument. */
1620 case TARGET_SO_DEBUG:
1621 optname = SO_DEBUG;
1622 goto int_case;
1623 case TARGET_SO_REUSEADDR:
1624 optname = SO_REUSEADDR;
1625 goto int_case;
1626 case TARGET_SO_TYPE:
1627 optname = SO_TYPE;
1628 goto int_case;
1629 case TARGET_SO_ERROR:
1630 optname = SO_ERROR;
1631 goto int_case;
1632 case TARGET_SO_DONTROUTE:
1633 optname = SO_DONTROUTE;
1634 goto int_case;
1635 case TARGET_SO_BROADCAST:
1636 optname = SO_BROADCAST;
1637 goto int_case;
1638 case TARGET_SO_SNDBUF:
1639 optname = SO_SNDBUF;
1640 goto int_case;
1641 case TARGET_SO_RCVBUF:
1642 optname = SO_RCVBUF;
1643 goto int_case;
1644 case TARGET_SO_KEEPALIVE:
1645 optname = SO_KEEPALIVE;
1646 goto int_case;
1647 case TARGET_SO_OOBINLINE:
1648 optname = SO_OOBINLINE;
1649 goto int_case;
1650 case TARGET_SO_NO_CHECK:
1651 optname = SO_NO_CHECK;
1652 goto int_case;
1653 case TARGET_SO_PRIORITY:
1654 optname = SO_PRIORITY;
1655 goto int_case;
1656 #ifdef SO_BSDCOMPAT
1657 case TARGET_SO_BSDCOMPAT:
1658 optname = SO_BSDCOMPAT;
1659 goto int_case;
1660 #endif
1661 case TARGET_SO_PASSCRED:
1662 optname = SO_PASSCRED;
1663 goto int_case;
1664 case TARGET_SO_TIMESTAMP:
1665 optname = SO_TIMESTAMP;
1666 goto int_case;
1667 case TARGET_SO_RCVLOWAT:
1668 optname = SO_RCVLOWAT;
1669 goto int_case;
1670 default:
1671 goto int_case;
1672 }
1673 break;
1674 case SOL_TCP:
1675 /* TCP options all take an 'int' value. */
1676 int_case:
1677 if (get_user_u32(len, optlen))
1678 return -TARGET_EFAULT;
1679 if (len < 0)
1680 return -TARGET_EINVAL;
1681 lv = sizeof(lv);
1682 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1683 if (ret < 0)
1684 return ret;
1685 if (len > lv)
1686 len = lv;
1687 if (len == 4) {
1688 if (put_user_u32(val, optval_addr))
1689 return -TARGET_EFAULT;
1690 } else {
1691 if (put_user_u8(val, optval_addr))
1692 return -TARGET_EFAULT;
1693 }
1694 if (put_user_u32(len, optlen))
1695 return -TARGET_EFAULT;
1696 break;
1697 case SOL_IP:
1698 switch(optname) {
1699 case IP_TOS:
1700 case IP_TTL:
1701 case IP_HDRINCL:
1702 case IP_ROUTER_ALERT:
1703 case IP_RECVOPTS:
1704 case IP_RETOPTS:
1705 case IP_PKTINFO:
1706 case IP_MTU_DISCOVER:
1707 case IP_RECVERR:
1708 case IP_RECVTOS:
1709 #ifdef IP_FREEBIND
1710 case IP_FREEBIND:
1711 #endif
1712 case IP_MULTICAST_TTL:
1713 case IP_MULTICAST_LOOP:
1714 if (get_user_u32(len, optlen))
1715 return -TARGET_EFAULT;
1716 if (len < 0)
1717 return -TARGET_EINVAL;
1718 lv = sizeof(lv);
1719 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1720 if (ret < 0)
1721 return ret;
1722 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1723 len = 1;
1724 if (put_user_u32(len, optlen)
1725 || put_user_u8(val, optval_addr))
1726 return -TARGET_EFAULT;
1727 } else {
1728 if (len > sizeof(int))
1729 len = sizeof(int);
1730 if (put_user_u32(len, optlen)
1731 || put_user_u32(val, optval_addr))
1732 return -TARGET_EFAULT;
1733 }
1734 break;
1735 default:
1736 ret = -TARGET_ENOPROTOOPT;
1737 break;
1738 }
1739 break;
1740 default:
1741 unimplemented:
1742 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1743 level, optname);
1744 ret = -TARGET_EOPNOTSUPP;
1745 break;
1746 }
1747 return ret;
1748 }
1749
1750 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1751 int count, int copy)
1752 {
1753 struct target_iovec *target_vec;
1754 struct iovec *vec;
1755 abi_ulong total_len, max_len;
1756 int i;
1757
1758 if (count == 0) {
1759 errno = 0;
1760 return NULL;
1761 }
1762 if (count > IOV_MAX) {
1763 errno = EINVAL;
1764 return NULL;
1765 }
1766
1767 vec = calloc(count, sizeof(struct iovec));
1768 if (vec == NULL) {
1769 errno = ENOMEM;
1770 return NULL;
1771 }
1772
1773 target_vec = lock_user(VERIFY_READ, target_addr,
1774 count * sizeof(struct target_iovec), 1);
1775 if (target_vec == NULL) {
1776 errno = EFAULT;
1777 goto fail2;
1778 }
1779
1780 /* ??? If host page size > target page size, this will result in a
1781 value larger than what we can actually support. */
1782 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1783 total_len = 0;
1784
1785 for (i = 0; i < count; i++) {
1786 abi_ulong base = tswapal(target_vec[i].iov_base);
1787 abi_long len = tswapal(target_vec[i].iov_len);
1788
1789 if (len < 0) {
1790 errno = EINVAL;
1791 goto fail;
1792 } else if (len == 0) {
1793 /* Zero length pointer is ignored. */
1794 vec[i].iov_base = 0;
1795 } else {
1796 vec[i].iov_base = lock_user(type, base, len, copy);
1797 if (!vec[i].iov_base) {
1798 errno = EFAULT;
1799 goto fail;
1800 }
1801 if (len > max_len - total_len) {
1802 len = max_len - total_len;
1803 }
1804 }
1805 vec[i].iov_len = len;
1806 total_len += len;
1807 }
1808
1809 unlock_user(target_vec, target_addr, 0);
1810 return vec;
1811
1812 fail:
1813 free(vec);
1814 fail2:
1815 unlock_user(target_vec, target_addr, 0);
1816 return NULL;
1817 }
1818
1819 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1820 int count, int copy)
1821 {
1822 struct target_iovec *target_vec;
1823 int i;
1824
1825 target_vec = lock_user(VERIFY_READ, target_addr,
1826 count * sizeof(struct target_iovec), 1);
1827 if (target_vec) {
1828 for (i = 0; i < count; i++) {
1829 abi_ulong base = tswapal(target_vec[i].iov_base);
1830 abi_long len = tswapal(target_vec[i].iov_base);
1831 if (len < 0) {
1832 break;
1833 }
1834 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1835 }
1836 unlock_user(target_vec, target_addr, 0);
1837 }
1838
1839 free(vec);
1840 }
1841
1842 /* do_socket() Must return target values and target errnos. */
1843 static abi_long do_socket(int domain, int type, int protocol)
1844 {
1845 #if defined(TARGET_MIPS)
1846 switch(type) {
1847 case TARGET_SOCK_DGRAM:
1848 type = SOCK_DGRAM;
1849 break;
1850 case TARGET_SOCK_STREAM:
1851 type = SOCK_STREAM;
1852 break;
1853 case TARGET_SOCK_RAW:
1854 type = SOCK_RAW;
1855 break;
1856 case TARGET_SOCK_RDM:
1857 type = SOCK_RDM;
1858 break;
1859 case TARGET_SOCK_SEQPACKET:
1860 type = SOCK_SEQPACKET;
1861 break;
1862 case TARGET_SOCK_PACKET:
1863 type = SOCK_PACKET;
1864 break;
1865 }
1866 #endif
1867 if (domain == PF_NETLINK)
1868 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1869 return get_errno(socket(domain, type, protocol));
1870 }
1871
1872 /* do_bind() Must return target values and target errnos. */
1873 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1874 socklen_t addrlen)
1875 {
1876 void *addr;
1877 abi_long ret;
1878
1879 if ((int)addrlen < 0) {
1880 return -TARGET_EINVAL;
1881 }
1882
1883 addr = alloca(addrlen+1);
1884
1885 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1886 if (ret)
1887 return ret;
1888
1889 return get_errno(bind(sockfd, addr, addrlen));
1890 }
1891
1892 /* do_connect() Must return target values and target errnos. */
1893 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1894 socklen_t addrlen)
1895 {
1896 void *addr;
1897 abi_long ret;
1898
1899 if ((int)addrlen < 0) {
1900 return -TARGET_EINVAL;
1901 }
1902
1903 addr = alloca(addrlen);
1904
1905 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1906 if (ret)
1907 return ret;
1908
1909 return get_errno(connect(sockfd, addr, addrlen));
1910 }
1911
1912 /* do_sendrecvmsg() Must return target values and target errnos. */
1913 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1914 int flags, int send)
1915 {
1916 abi_long ret, len;
1917 struct target_msghdr *msgp;
1918 struct msghdr msg;
1919 int count;
1920 struct iovec *vec;
1921 abi_ulong target_vec;
1922
1923 /* FIXME */
1924 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1925 msgp,
1926 target_msg,
1927 send ? 1 : 0))
1928 return -TARGET_EFAULT;
1929 if (msgp->msg_name) {
1930 msg.msg_namelen = tswap32(msgp->msg_namelen);
1931 msg.msg_name = alloca(msg.msg_namelen);
1932 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1933 msg.msg_namelen);
1934 if (ret) {
1935 goto out2;
1936 }
1937 } else {
1938 msg.msg_name = NULL;
1939 msg.msg_namelen = 0;
1940 }
1941 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1942 msg.msg_control = alloca(msg.msg_controllen);
1943 msg.msg_flags = tswap32(msgp->msg_flags);
1944
1945 count = tswapal(msgp->msg_iovlen);
1946 target_vec = tswapal(msgp->msg_iov);
1947 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1948 target_vec, count, send);
1949 if (vec == NULL) {
1950 ret = -host_to_target_errno(errno);
1951 goto out2;
1952 }
1953 msg.msg_iovlen = count;
1954 msg.msg_iov = vec;
1955
1956 if (send) {
1957 ret = target_to_host_cmsg(&msg, msgp);
1958 if (ret == 0)
1959 ret = get_errno(sendmsg(fd, &msg, flags));
1960 } else {
1961 ret = get_errno(recvmsg(fd, &msg, flags));
1962 if (!is_error(ret)) {
1963 len = ret;
1964 ret = host_to_target_cmsg(msgp, &msg);
1965 if (!is_error(ret)) {
1966 msgp->msg_namelen = tswap32(msg.msg_namelen);
1967 if (msg.msg_name != NULL) {
1968 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1969 msg.msg_name, msg.msg_namelen);
1970 if (ret) {
1971 goto out;
1972 }
1973 }
1974
1975 ret = len;
1976 }
1977 }
1978 }
1979
1980 out:
1981 unlock_iovec(vec, target_vec, count, !send);
1982 out2:
1983 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1984 return ret;
1985 }
1986
1987 /* do_accept() Must return target values and target errnos. */
1988 static abi_long do_accept(int fd, abi_ulong target_addr,
1989 abi_ulong target_addrlen_addr)
1990 {
1991 socklen_t addrlen;
1992 void *addr;
1993 abi_long ret;
1994
1995 if (target_addr == 0)
1996 return get_errno(accept(fd, NULL, NULL));
1997
1998 /* linux returns EINVAL if addrlen pointer is invalid */
1999 if (get_user_u32(addrlen, target_addrlen_addr))
2000 return -TARGET_EINVAL;
2001
2002 if ((int)addrlen < 0) {
2003 return -TARGET_EINVAL;
2004 }
2005
2006 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2007 return -TARGET_EINVAL;
2008
2009 addr = alloca(addrlen);
2010
2011 ret = get_errno(accept(fd, addr, &addrlen));
2012 if (!is_error(ret)) {
2013 host_to_target_sockaddr(target_addr, addr, addrlen);
2014 if (put_user_u32(addrlen, target_addrlen_addr))
2015 ret = -TARGET_EFAULT;
2016 }
2017 return ret;
2018 }
2019
2020 /* do_getpeername() Must return target values and target errnos. */
2021 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2022 abi_ulong target_addrlen_addr)
2023 {
2024 socklen_t addrlen;
2025 void *addr;
2026 abi_long ret;
2027
2028 if (get_user_u32(addrlen, target_addrlen_addr))
2029 return -TARGET_EFAULT;
2030
2031 if ((int)addrlen < 0) {
2032 return -TARGET_EINVAL;
2033 }
2034
2035 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2036 return -TARGET_EFAULT;
2037
2038 addr = alloca(addrlen);
2039
2040 ret = get_errno(getpeername(fd, addr, &addrlen));
2041 if (!is_error(ret)) {
2042 host_to_target_sockaddr(target_addr, addr, addrlen);
2043 if (put_user_u32(addrlen, target_addrlen_addr))
2044 ret = -TARGET_EFAULT;
2045 }
2046 return ret;
2047 }
2048
2049 /* do_getsockname() Must return target values and target errnos. */
2050 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2051 abi_ulong target_addrlen_addr)
2052 {
2053 socklen_t addrlen;
2054 void *addr;
2055 abi_long ret;
2056
2057 if (get_user_u32(addrlen, target_addrlen_addr))
2058 return -TARGET_EFAULT;
2059
2060 if ((int)addrlen < 0) {
2061 return -TARGET_EINVAL;
2062 }
2063
2064 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2065 return -TARGET_EFAULT;
2066
2067 addr = alloca(addrlen);
2068
2069 ret = get_errno(getsockname(fd, addr, &addrlen));
2070 if (!is_error(ret)) {
2071 host_to_target_sockaddr(target_addr, addr, addrlen);
2072 if (put_user_u32(addrlen, target_addrlen_addr))
2073 ret = -TARGET_EFAULT;
2074 }
2075 return ret;
2076 }
2077
2078 /* do_socketpair() Must return target values and target errnos. */
2079 static abi_long do_socketpair(int domain, int type, int protocol,
2080 abi_ulong target_tab_addr)
2081 {
2082 int tab[2];
2083 abi_long ret;
2084
2085 ret = get_errno(socketpair(domain, type, protocol, tab));
2086 if (!is_error(ret)) {
2087 if (put_user_s32(tab[0], target_tab_addr)
2088 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2089 ret = -TARGET_EFAULT;
2090 }
2091 return ret;
2092 }
2093
2094 /* do_sendto() Must return target values and target errnos. */
2095 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2096 abi_ulong target_addr, socklen_t addrlen)
2097 {
2098 void *addr;
2099 void *host_msg;
2100 abi_long ret;
2101
2102 if ((int)addrlen < 0) {
2103 return -TARGET_EINVAL;
2104 }
2105
2106 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2107 if (!host_msg)
2108 return -TARGET_EFAULT;
2109 if (target_addr) {
2110 addr = alloca(addrlen);
2111 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2112 if (ret) {
2113 unlock_user(host_msg, msg, 0);
2114 return ret;
2115 }
2116 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2117 } else {
2118 ret = get_errno(send(fd, host_msg, len, flags));
2119 }
2120 unlock_user(host_msg, msg, 0);
2121 return ret;
2122 }
2123
2124 /* do_recvfrom() Must return target values and target errnos. */
2125 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2126 abi_ulong target_addr,
2127 abi_ulong target_addrlen)
2128 {
2129 socklen_t addrlen;
2130 void *addr;
2131 void *host_msg;
2132 abi_long ret;
2133
2134 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2135 if (!host_msg)
2136 return -TARGET_EFAULT;
2137 if (target_addr) {
2138 if (get_user_u32(addrlen, target_addrlen)) {
2139 ret = -TARGET_EFAULT;
2140 goto fail;
2141 }
2142 if ((int)addrlen < 0) {
2143 ret = -TARGET_EINVAL;
2144 goto fail;
2145 }
2146 addr = alloca(addrlen);
2147 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2148 } else {
2149 addr = NULL; /* To keep compiler quiet. */
2150 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2151 }
2152 if (!is_error(ret)) {
2153 if (target_addr) {
2154 host_to_target_sockaddr(target_addr, addr, addrlen);
2155 if (put_user_u32(addrlen, target_addrlen)) {
2156 ret = -TARGET_EFAULT;
2157 goto fail;
2158 }
2159 }
2160 unlock_user(host_msg, msg, len);
2161 } else {
2162 fail:
2163 unlock_user(host_msg, msg, 0);
2164 }
2165 return ret;
2166 }
2167
2168 #ifdef TARGET_NR_socketcall
2169 /* do_socketcall() Must return target values and target errnos. */
2170 static abi_long do_socketcall(int num, abi_ulong vptr)
2171 {
2172 abi_long ret;
2173 const int n = sizeof(abi_ulong);
2174
2175 switch(num) {
2176 case SOCKOP_socket:
2177 {
2178 abi_ulong domain, type, protocol;
2179
2180 if (get_user_ual(domain, vptr)
2181 || get_user_ual(type, vptr + n)
2182 || get_user_ual(protocol, vptr + 2 * n))
2183 return -TARGET_EFAULT;
2184
2185 ret = do_socket(domain, type, protocol);
2186 }
2187 break;
2188 case SOCKOP_bind:
2189 {
2190 abi_ulong sockfd;
2191 abi_ulong target_addr;
2192 socklen_t addrlen;
2193
2194 if (get_user_ual(sockfd, vptr)
2195 || get_user_ual(target_addr, vptr + n)
2196 || get_user_ual(addrlen, vptr + 2 * n))
2197 return -TARGET_EFAULT;
2198
2199 ret = do_bind(sockfd, target_addr, addrlen);
2200 }
2201 break;
2202 case SOCKOP_connect:
2203 {
2204 abi_ulong sockfd;
2205 abi_ulong target_addr;
2206 socklen_t addrlen;
2207
2208 if (get_user_ual(sockfd, vptr)
2209 || get_user_ual(target_addr, vptr + n)
2210 || get_user_ual(addrlen, vptr + 2 * n))
2211 return -TARGET_EFAULT;
2212
2213 ret = do_connect(sockfd, target_addr, addrlen);
2214 }
2215 break;
2216 case SOCKOP_listen:
2217 {
2218 abi_ulong sockfd, backlog;
2219
2220 if (get_user_ual(sockfd, vptr)
2221 || get_user_ual(backlog, vptr + n))
2222 return -TARGET_EFAULT;
2223
2224 ret = get_errno(listen(sockfd, backlog));
2225 }
2226 break;
2227 case SOCKOP_accept:
2228 {
2229 abi_ulong sockfd;
2230 abi_ulong target_addr, target_addrlen;
2231
2232 if (get_user_ual(sockfd, vptr)
2233 || get_user_ual(target_addr, vptr + n)
2234 || get_user_ual(target_addrlen, vptr + 2 * n))
2235 return -TARGET_EFAULT;
2236
2237 ret = do_accept(sockfd, target_addr, target_addrlen);
2238 }
2239 break;
2240 case SOCKOP_getsockname:
2241 {
2242 abi_ulong sockfd;
2243 abi_ulong target_addr, target_addrlen;
2244
2245 if (get_user_ual(sockfd, vptr)
2246 || get_user_ual(target_addr, vptr + n)
2247 || get_user_ual(target_addrlen, vptr + 2 * n))
2248 return -TARGET_EFAULT;
2249
2250 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2251 }
2252 break;
2253 case SOCKOP_getpeername:
2254 {
2255 abi_ulong sockfd;
2256 abi_ulong target_addr, target_addrlen;
2257
2258 if (get_user_ual(sockfd, vptr)
2259 || get_user_ual(target_addr, vptr + n)
2260 || get_user_ual(target_addrlen, vptr + 2 * n))
2261 return -TARGET_EFAULT;
2262
2263 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2264 }
2265 break;
2266 case SOCKOP_socketpair:
2267 {
2268 abi_ulong domain, type, protocol;
2269 abi_ulong tab;
2270
2271 if (get_user_ual(domain, vptr)
2272 || get_user_ual(type, vptr + n)
2273 || get_user_ual(protocol, vptr + 2 * n)
2274 || get_user_ual(tab, vptr + 3 * n))
2275 return -TARGET_EFAULT;
2276
2277 ret = do_socketpair(domain, type, protocol, tab);
2278 }
2279 break;
2280 case SOCKOP_send:
2281 {
2282 abi_ulong sockfd;
2283 abi_ulong msg;
2284 size_t len;
2285 abi_ulong flags;
2286
2287 if (get_user_ual(sockfd, vptr)
2288 || get_user_ual(msg, vptr + n)
2289 || get_user_ual(len, vptr + 2 * n)
2290 || get_user_ual(flags, vptr + 3 * n))
2291 return -TARGET_EFAULT;
2292
2293 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2294 }
2295 break;
2296 case SOCKOP_recv:
2297 {
2298 abi_ulong sockfd;
2299 abi_ulong msg;
2300 size_t len;
2301 abi_ulong flags;
2302
2303 if (get_user_ual(sockfd, vptr)
2304 || get_user_ual(msg, vptr + n)
2305 || get_user_ual(len, vptr + 2 * n)
2306 || get_user_ual(flags, vptr + 3 * n))
2307 return -TARGET_EFAULT;
2308
2309 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2310 }
2311 break;
2312 case SOCKOP_sendto:
2313 {
2314 abi_ulong sockfd;
2315 abi_ulong msg;
2316 size_t len;
2317 abi_ulong flags;
2318 abi_ulong addr;
2319 socklen_t addrlen;
2320
2321 if (get_user_ual(sockfd, vptr)
2322 || get_user_ual(msg, vptr + n)
2323 || get_user_ual(len, vptr + 2 * n)
2324 || get_user_ual(flags, vptr + 3 * n)
2325 || get_user_ual(addr, vptr + 4 * n)
2326 || get_user_ual(addrlen, vptr + 5 * n))
2327 return -TARGET_EFAULT;
2328
2329 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2330 }
2331 break;
2332 case SOCKOP_recvfrom:
2333 {
2334 abi_ulong sockfd;
2335 abi_ulong msg;
2336 size_t len;
2337 abi_ulong flags;
2338 abi_ulong addr;
2339 socklen_t addrlen;
2340
2341 if (get_user_ual(sockfd, vptr)
2342 || get_user_ual(msg, vptr + n)
2343 || get_user_ual(len, vptr + 2 * n)
2344 || get_user_ual(flags, vptr + 3 * n)
2345 || get_user_ual(addr, vptr + 4 * n)
2346 || get_user_ual(addrlen, vptr + 5 * n))
2347 return -TARGET_EFAULT;
2348
2349 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2350 }
2351 break;
2352 case SOCKOP_shutdown:
2353 {
2354 abi_ulong sockfd, how;
2355
2356 if (get_user_ual(sockfd, vptr)
2357 || get_user_ual(how, vptr + n))
2358 return -TARGET_EFAULT;
2359
2360 ret = get_errno(shutdown(sockfd, how));
2361 }
2362 break;
2363 case SOCKOP_sendmsg:
2364 case SOCKOP_recvmsg:
2365 {
2366 abi_ulong fd;
2367 abi_ulong target_msg;
2368 abi_ulong flags;
2369
2370 if (get_user_ual(fd, vptr)
2371 || get_user_ual(target_msg, vptr + n)
2372 || get_user_ual(flags, vptr + 2 * n))
2373 return -TARGET_EFAULT;
2374
2375 ret = do_sendrecvmsg(fd, target_msg, flags,
2376 (num == SOCKOP_sendmsg));
2377 }
2378 break;
2379 case SOCKOP_setsockopt:
2380 {
2381 abi_ulong sockfd;
2382 abi_ulong level;
2383 abi_ulong optname;
2384 abi_ulong optval;
2385 socklen_t optlen;
2386
2387 if (get_user_ual(sockfd, vptr)
2388 || get_user_ual(level, vptr + n)
2389 || get_user_ual(optname, vptr + 2 * n)
2390 || get_user_ual(optval, vptr + 3 * n)
2391 || get_user_ual(optlen, vptr + 4 * n))
2392 return -TARGET_EFAULT;
2393
2394 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2395 }
2396 break;
2397 case SOCKOP_getsockopt:
2398 {
2399 abi_ulong sockfd;
2400 abi_ulong level;
2401 abi_ulong optname;
2402 abi_ulong optval;
2403 socklen_t optlen;
2404
2405 if (get_user_ual(sockfd, vptr)
2406 || get_user_ual(level, vptr + n)
2407 || get_user_ual(optname, vptr + 2 * n)
2408 || get_user_ual(optval, vptr + 3 * n)
2409 || get_user_ual(optlen, vptr + 4 * n))
2410 return -TARGET_EFAULT;
2411
2412 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2413 }
2414 break;
2415 default:
2416 gemu_log("Unsupported socketcall: %d\n", num);
2417 ret = -TARGET_ENOSYS;
2418 break;
2419 }
2420 return ret;
2421 }
2422 #endif
2423
2424 #define N_SHM_REGIONS 32
2425
2426 static struct shm_region {
2427 abi_ulong start;
2428 abi_ulong size;
2429 } shm_regions[N_SHM_REGIONS];
2430
2431 struct target_ipc_perm
2432 {
2433 abi_long __key;
2434 abi_ulong uid;
2435 abi_ulong gid;
2436 abi_ulong cuid;
2437 abi_ulong cgid;
2438 unsigned short int mode;
2439 unsigned short int __pad1;
2440 unsigned short int __seq;
2441 unsigned short int __pad2;
2442 abi_ulong __unused1;
2443 abi_ulong __unused2;
2444 };
2445
2446 struct target_semid_ds
2447 {
2448 struct target_ipc_perm sem_perm;
2449 abi_ulong sem_otime;
2450 abi_ulong __unused1;
2451 abi_ulong sem_ctime;
2452 abi_ulong __unused2;
2453 abi_ulong sem_nsems;
2454 abi_ulong __unused3;
2455 abi_ulong __unused4;
2456 };
2457
2458 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2459 abi_ulong target_addr)
2460 {
2461 struct target_ipc_perm *target_ip;
2462 struct target_semid_ds *target_sd;
2463
2464 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2465 return -TARGET_EFAULT;
2466 target_ip = &(target_sd->sem_perm);
2467 host_ip->__key = tswapal(target_ip->__key);
2468 host_ip->uid = tswapal(target_ip->uid);
2469 host_ip->gid = tswapal(target_ip->gid);
2470 host_ip->cuid = tswapal(target_ip->cuid);
2471 host_ip->cgid = tswapal(target_ip->cgid);
2472 host_ip->mode = tswap16(target_ip->mode);
2473 unlock_user_struct(target_sd, target_addr, 0);
2474 return 0;
2475 }
2476
2477 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2478 struct ipc_perm *host_ip)
2479 {
2480 struct target_ipc_perm *target_ip;
2481 struct target_semid_ds *target_sd;
2482
2483 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2484 return -TARGET_EFAULT;
2485 target_ip = &(target_sd->sem_perm);
2486 target_ip->__key = tswapal(host_ip->__key);
2487 target_ip->uid = tswapal(host_ip->uid);
2488 target_ip->gid = tswapal(host_ip->gid);
2489 target_ip->cuid = tswapal(host_ip->cuid);
2490 target_ip->cgid = tswapal(host_ip->cgid);
2491 target_ip->mode = tswap16(host_ip->mode);
2492 unlock_user_struct(target_sd, target_addr, 1);
2493 return 0;
2494 }
2495
2496 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2497 abi_ulong target_addr)
2498 {
2499 struct target_semid_ds *target_sd;
2500
2501 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2502 return -TARGET_EFAULT;
2503 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2504 return -TARGET_EFAULT;
2505 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2506 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2507 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2508 unlock_user_struct(target_sd, target_addr, 0);
2509 return 0;
2510 }
2511
2512 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2513 struct semid_ds *host_sd)
2514 {
2515 struct target_semid_ds *target_sd;
2516
2517 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2518 return -TARGET_EFAULT;
2519 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2520 return -TARGET_EFAULT;
2521 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2522 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2523 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2524 unlock_user_struct(target_sd, target_addr, 1);
2525 return 0;
2526 }
2527
2528 struct target_seminfo {
2529 int semmap;
2530 int semmni;
2531 int semmns;
2532 int semmnu;
2533 int semmsl;
2534 int semopm;
2535 int semume;
2536 int semusz;
2537 int semvmx;
2538 int semaem;
2539 };
2540
2541 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2542 struct seminfo *host_seminfo)
2543 {
2544 struct target_seminfo *target_seminfo;
2545 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2546 return -TARGET_EFAULT;
2547 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2548 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2549 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2550 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2551 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2552 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2553 __put_user(host_seminfo->semume, &target_seminfo->semume);
2554 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2555 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2556 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2557 unlock_user_struct(target_seminfo, target_addr, 1);
2558 return 0;
2559 }
2560
2561 union semun {
2562 int val;
2563 struct semid_ds *buf;
2564 unsigned short *array;
2565 struct seminfo *__buf;
2566 };
2567
2568 union target_semun {
2569 int val;
2570 abi_ulong buf;
2571 abi_ulong array;
2572 abi_ulong __buf;
2573 };
2574
2575 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2576 abi_ulong target_addr)
2577 {
2578 int nsems;
2579 unsigned short *array;
2580 union semun semun;
2581 struct semid_ds semid_ds;
2582 int i, ret;
2583
2584 semun.buf = &semid_ds;
2585
2586 ret = semctl(semid, 0, IPC_STAT, semun);
2587 if (ret == -1)
2588 return get_errno(ret);
2589
2590 nsems = semid_ds.sem_nsems;
2591
2592 *host_array = malloc(nsems*sizeof(unsigned short));
2593 array = lock_user(VERIFY_READ, target_addr,
2594 nsems*sizeof(unsigned short), 1);
2595 if (!array)
2596 return -TARGET_EFAULT;
2597
2598 for(i=0; i<nsems; i++) {
2599 __get_user((*host_array)[i], &array[i]);
2600 }
2601 unlock_user(array, target_addr, 0);
2602
2603 return 0;
2604 }
2605
2606 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2607 unsigned short **host_array)
2608 {
2609 int nsems;
2610 unsigned short *array;
2611 union semun semun;
2612 struct semid_ds semid_ds;
2613 int i, ret;
2614
2615 semun.buf = &semid_ds;
2616
2617 ret = semctl(semid, 0, IPC_STAT, semun);
2618 if (ret == -1)
2619 return get_errno(ret);
2620
2621 nsems = semid_ds.sem_nsems;
2622
2623 array = lock_user(VERIFY_WRITE, target_addr,
2624 nsems*sizeof(unsigned short), 0);
2625 if (!array)
2626 return -TARGET_EFAULT;
2627
2628 for(i=0; i<nsems; i++) {
2629 __put_user((*host_array)[i], &array[i]);
2630 }
2631 free(*host_array);
2632 unlock_user(array, target_addr, 1);
2633
2634 return 0;
2635 }
2636
2637 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2638 union target_semun target_su)
2639 {
2640 union semun arg;
2641 struct semid_ds dsarg;
2642 unsigned short *array = NULL;
2643 struct seminfo seminfo;
2644 abi_long ret = -TARGET_EINVAL;
2645 abi_long err;
2646 cmd &= 0xff;
2647
2648 switch( cmd ) {
2649 case GETVAL:
2650 case SETVAL:
2651 arg.val = tswap32(target_su.val);
2652 ret = get_errno(semctl(semid, semnum, cmd, arg));
2653 target_su.val = tswap32(arg.val);
2654 break;
2655 case GETALL:
2656 case SETALL:
2657 err = target_to_host_semarray(semid, &array, target_su.array);
2658 if (err)
2659 return err;
2660 arg.array = array;
2661 ret = get_errno(semctl(semid, semnum, cmd, arg));
2662 err = host_to_target_semarray(semid, target_su.array, &array);
2663 if (err)
2664 return err;
2665 break;
2666 case IPC_STAT:
2667 case IPC_SET:
2668 case SEM_STAT:
2669 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2670 if (err)
2671 return err;
2672 arg.buf = &dsarg;
2673 ret = get_errno(semctl(semid, semnum, cmd, arg));
2674 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2675 if (err)
2676 return err;
2677 break;
2678 case IPC_INFO:
2679 case SEM_INFO:
2680 arg.__buf = &seminfo;
2681 ret = get_errno(semctl(semid, semnum, cmd, arg));
2682 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2683 if (err)
2684 return err;
2685 break;
2686 case IPC_RMID:
2687 case GETPID:
2688 case GETNCNT:
2689 case GETZCNT:
2690 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2691 break;
2692 }
2693
2694 return ret;
2695 }
2696
2697 struct target_sembuf {
2698 unsigned short sem_num;
2699 short sem_op;
2700 short sem_flg;
2701 };
2702
2703 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2704 abi_ulong target_addr,
2705 unsigned nsops)
2706 {
2707 struct target_sembuf *target_sembuf;
2708 int i;
2709
2710 target_sembuf = lock_user(VERIFY_READ, target_addr,
2711 nsops*sizeof(struct target_sembuf), 1);
2712 if (!target_sembuf)
2713 return -TARGET_EFAULT;
2714
2715 for(i=0; i<nsops; i++) {
2716 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2717 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2718 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2719 }
2720
2721 unlock_user(target_sembuf, target_addr, 0);
2722
2723 return 0;
2724 }
2725
2726 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2727 {
2728 struct sembuf sops[nsops];
2729
2730 if (target_to_host_sembuf(sops, ptr, nsops))
2731 return -TARGET_EFAULT;
2732
2733 return semop(semid, sops, nsops);
2734 }
2735
2736 struct target_msqid_ds
2737 {
2738 struct target_ipc_perm msg_perm;
2739 abi_ulong msg_stime;
2740 #if TARGET_ABI_BITS == 32
2741 abi_ulong __unused1;
2742 #endif
2743 abi_ulong msg_rtime;
2744 #if TARGET_ABI_BITS == 32
2745 abi_ulong __unused2;
2746 #endif
2747 abi_ulong msg_ctime;
2748 #if TARGET_ABI_BITS == 32
2749 abi_ulong __unused3;
2750 #endif
2751 abi_ulong __msg_cbytes;
2752 abi_ulong msg_qnum;
2753 abi_ulong msg_qbytes;
2754 abi_ulong msg_lspid;
2755 abi_ulong msg_lrpid;
2756 abi_ulong __unused4;
2757 abi_ulong __unused5;
2758 };
2759
2760 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2761 abi_ulong target_addr)
2762 {
2763 struct target_msqid_ds *target_md;
2764
2765 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2766 return -TARGET_EFAULT;
2767 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2768 return -TARGET_EFAULT;
2769 host_md->msg_stime = tswapal(target_md->msg_stime);
2770 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2771 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2772 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2773 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2774 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2775 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2776 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2777 unlock_user_struct(target_md, target_addr, 0);
2778 return 0;
2779 }
2780
2781 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2782 struct msqid_ds *host_md)
2783 {
2784 struct target_msqid_ds *target_md;
2785
2786 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2787 return -TARGET_EFAULT;
2788 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2789 return -TARGET_EFAULT;
2790 target_md->msg_stime = tswapal(host_md->msg_stime);
2791 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2792 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2793 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2794 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2795 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2796 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2797 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2798 unlock_user_struct(target_md, target_addr, 1);
2799 return 0;
2800 }
2801
2802 struct target_msginfo {
2803 int msgpool;
2804 int msgmap;
2805 int msgmax;
2806 int msgmnb;
2807 int msgmni;
2808 int msgssz;
2809 int msgtql;
2810 unsigned short int msgseg;
2811 };
2812
2813 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2814 struct msginfo *host_msginfo)
2815 {
2816 struct target_msginfo *target_msginfo;
2817 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2818 return -TARGET_EFAULT;
2819 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2820 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2821 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2822 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2823 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2824 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2825 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2826 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2827 unlock_user_struct(target_msginfo, target_addr, 1);
2828 return 0;
2829 }
2830
2831 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2832 {
2833 struct msqid_ds dsarg;
2834 struct msginfo msginfo;
2835 abi_long ret = -TARGET_EINVAL;
2836
2837 cmd &= 0xff;
2838
2839 switch (cmd) {
2840 case IPC_STAT:
2841 case IPC_SET:
2842 case MSG_STAT:
2843 if (target_to_host_msqid_ds(&dsarg,ptr))
2844 return -TARGET_EFAULT;
2845 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2846 if (host_to_target_msqid_ds(ptr,&dsarg))
2847 return -TARGET_EFAULT;
2848 break;
2849 case IPC_RMID:
2850 ret = get_errno(msgctl(msgid, cmd, NULL));
2851 break;
2852 case IPC_INFO:
2853 case MSG_INFO:
2854 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2855 if (host_to_target_msginfo(ptr, &msginfo))
2856 return -TARGET_EFAULT;
2857 break;
2858 }
2859
2860 return ret;
2861 }
2862
2863 struct target_msgbuf {
2864 abi_long mtype;
2865 char mtext[1];
2866 };
2867
2868 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2869 unsigned int msgsz, int msgflg)
2870 {
2871 struct target_msgbuf *target_mb;
2872 struct msgbuf *host_mb;
2873 abi_long ret = 0;
2874
2875 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2876 return -TARGET_EFAULT;
2877 host_mb = malloc(msgsz+sizeof(long));
2878 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2879 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2880 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2881 free(host_mb);
2882 unlock_user_struct(target_mb, msgp, 0);
2883
2884 return ret;
2885 }
2886
2887 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2888 unsigned int msgsz, abi_long msgtyp,
2889 int msgflg)
2890 {
2891 struct target_msgbuf *target_mb;
2892 char *target_mtext;
2893 struct msgbuf *host_mb;
2894 abi_long ret = 0;
2895
2896 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2897 return -TARGET_EFAULT;
2898
2899 host_mb = g_malloc(msgsz+sizeof(long));
2900 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2901
2902 if (ret > 0) {
2903 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2904 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2905 if (!target_mtext) {
2906 ret = -TARGET_EFAULT;
2907 goto end;
2908 }
2909 memcpy(target_mb->mtext, host_mb->mtext, ret);
2910 unlock_user(target_mtext, target_mtext_addr, ret);
2911 }
2912
2913 target_mb->mtype = tswapal(host_mb->mtype);
2914
2915 end:
2916 if (target_mb)
2917 unlock_user_struct(target_mb, msgp, 1);
2918 g_free(host_mb);
2919 return ret;
2920 }
2921
2922 struct target_shmid_ds
2923 {
2924 struct target_ipc_perm shm_perm;
2925 abi_ulong shm_segsz;
2926 abi_ulong shm_atime;
2927 #if TARGET_ABI_BITS == 32
2928 abi_ulong __unused1;
2929 #endif
2930 abi_ulong shm_dtime;
2931 #if TARGET_ABI_BITS == 32
2932 abi_ulong __unused2;
2933 #endif
2934 abi_ulong shm_ctime;
2935 #if TARGET_ABI_BITS == 32
2936 abi_ulong __unused3;
2937 #endif
2938 int shm_cpid;
2939 int shm_lpid;
2940 abi_ulong shm_nattch;
2941 unsigned long int __unused4;
2942 unsigned long int __unused5;
2943 };
2944
2945 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2946 abi_ulong target_addr)
2947 {
2948 struct target_shmid_ds *target_sd;
2949
2950 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2951 return -TARGET_EFAULT;
2952 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2953 return -TARGET_EFAULT;
2954 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2955 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2956 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2957 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2958 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2959 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2960 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2961 unlock_user_struct(target_sd, target_addr, 0);
2962 return 0;
2963 }
2964
2965 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2966 struct shmid_ds *host_sd)
2967 {
2968 struct target_shmid_ds *target_sd;
2969
2970 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2971 return -TARGET_EFAULT;
2972 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2973 return -TARGET_EFAULT;
2974 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2975 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2976 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2977 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2978 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2979 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2980 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2981 unlock_user_struct(target_sd, target_addr, 1);
2982 return 0;
2983 }
2984
2985 struct target_shminfo {
2986 abi_ulong shmmax;
2987 abi_ulong shmmin;
2988 abi_ulong shmmni;
2989 abi_ulong shmseg;
2990 abi_ulong shmall;
2991 };
2992
2993 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2994 struct shminfo *host_shminfo)
2995 {
2996 struct target_shminfo *target_shminfo;
2997 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2998 return -TARGET_EFAULT;
2999 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3000 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3001 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3002 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3003 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3004 unlock_user_struct(target_shminfo, target_addr, 1);
3005 return 0;
3006 }
3007
3008 struct target_shm_info {
3009 int used_ids;
3010 abi_ulong shm_tot;
3011 abi_ulong shm_rss;
3012 abi_ulong shm_swp;
3013 abi_ulong swap_attempts;
3014 abi_ulong swap_successes;
3015 };
3016
3017 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3018 struct shm_info *host_shm_info)
3019 {
3020 struct target_shm_info *target_shm_info;
3021 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3022 return -TARGET_EFAULT;
3023 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3024 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3025 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3026 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3027 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3028 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3029 unlock_user_struct(target_shm_info, target_addr, 1);
3030 return 0;
3031 }
3032
3033 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3034 {
3035 struct shmid_ds dsarg;
3036 struct shminfo shminfo;
3037 struct shm_info shm_info;
3038 abi_long ret = -TARGET_EINVAL;
3039
3040 cmd &= 0xff;
3041
3042 switch(cmd) {
3043 case IPC_STAT:
3044 case IPC_SET:
3045 case SHM_STAT:
3046 if (target_to_host_shmid_ds(&dsarg, buf))
3047 return -TARGET_EFAULT;
3048 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3049 if (host_to_target_shmid_ds(buf, &dsarg))
3050 return -TARGET_EFAULT;
3051 break;
3052 case IPC_INFO:
3053 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3054 if (host_to_target_shminfo(buf, &shminfo))
3055 return -TARGET_EFAULT;
3056 break;
3057 case SHM_INFO:
3058 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3059 if (host_to_target_shm_info(buf, &shm_info))
3060 return -TARGET_EFAULT;
3061 break;
3062 case IPC_RMID:
3063 case SHM_LOCK:
3064 case SHM_UNLOCK:
3065 ret = get_errno(shmctl(shmid, cmd, NULL));
3066 break;
3067 }
3068
3069 return ret;
3070 }
3071
3072 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3073 {
3074 abi_long raddr;
3075 void *host_raddr;
3076 struct shmid_ds shm_info;
3077 int i,ret;
3078
3079 /* find out the length of the shared memory segment */
3080 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3081 if (is_error(ret)) {
3082 /* can't get length, bail out */
3083 return ret;
3084 }
3085
3086 mmap_lock();
3087
3088 if (shmaddr)
3089 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3090 else {
3091 abi_ulong mmap_start;
3092
3093 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3094
3095 if (mmap_start == -1) {
3096 errno = ENOMEM;
3097 host_raddr = (void *)-1;
3098 } else
3099 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3100 }
3101
3102 if (host_raddr == (void *)-1) {
3103 mmap_unlock();
3104 return get_errno((long)host_raddr);
3105 }
3106 raddr=h2g((unsigned long)host_raddr);
3107
3108 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3109 PAGE_VALID | PAGE_READ |
3110 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3111
3112 for (i = 0; i < N_SHM_REGIONS; i++) {
3113 if (shm_regions[i].start == 0) {
3114 shm_regions[i].start = raddr;
3115 shm_regions[i].size = shm_info.shm_segsz;
3116 break;
3117 }
3118 }
3119
3120 mmap_unlock();
3121 return raddr;
3122
3123 }
3124
3125 static inline abi_long do_shmdt(abi_ulong shmaddr)
3126 {
3127 int i;
3128
3129 for (i = 0; i < N_SHM_REGIONS; ++i) {
3130 if (shm_regions[i].start == shmaddr) {
3131 shm_regions[i].start = 0;
3132 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3133 break;
3134 }
3135 }
3136
3137 return get_errno(shmdt(g2h(shmaddr)));
3138 }
3139
3140 #ifdef TARGET_NR_ipc
3141 /* ??? This only works with linear mappings. */
3142 /* do_ipc() must return target values and target errnos. */
3143 static abi_long do_ipc(unsigned int call, int first,
3144 int second, int third,
3145 abi_long ptr, abi_long fifth)
3146 {
3147 int version;
3148 abi_long ret = 0;
3149
3150 version = call >> 16;
3151 call &= 0xffff;
3152
3153 switch (call) {
3154 case IPCOP_semop:
3155 ret = do_semop(first, ptr, second);
3156 break;
3157
3158 case IPCOP_semget:
3159 ret = get_errno(semget(first, second, third));
3160 break;
3161
3162 case IPCOP_semctl:
3163 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3164 break;
3165
3166 case IPCOP_msgget:
3167 ret = get_errno(msgget(first, second));
3168 break;
3169
3170 case IPCOP_msgsnd:
3171 ret = do_msgsnd(first, ptr, second, third);
3172 break;
3173
3174 case IPCOP_msgctl:
3175 ret = do_msgctl(first, second, ptr);
3176 break;
3177
3178 case IPCOP_msgrcv:
3179 switch (version) {
3180 case 0:
3181 {
3182 struct target_ipc_kludge {
3183 abi_long msgp;
3184 abi_long msgtyp;
3185 } *tmp;
3186
3187 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3188 ret = -TARGET_EFAULT;
3189 break;
3190 }
3191
3192 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3193
3194 unlock_user_struct(tmp, ptr, 0);
3195 break;
3196 }
3197 default:
3198 ret = do_msgrcv(first, ptr, second, fifth, third);
3199 }
3200 break;
3201
3202 case IPCOP_shmat:
3203 switch (version) {
3204 default:
3205 {
3206 abi_ulong raddr;
3207 raddr = do_shmat(first, ptr, second);
3208 if (is_error(raddr))
3209 return get_errno(raddr);
3210 if (put_user_ual(raddr, third))
3211 return -TARGET_EFAULT;
3212 break;
3213 }
3214 case 1:
3215 ret = -TARGET_EINVAL;
3216 break;
3217 }
3218 break;
3219 case IPCOP_shmdt:
3220 ret = do_shmdt(ptr);
3221 break;
3222
3223 case IPCOP_shmget:
3224 /* IPC_* flag values are the same on all linux platforms */
3225 ret = get_errno(shmget(first, second, third));
3226 break;
3227
3228 /* IPC_* and SHM_* command values are the same on all linux platforms */
3229 case IPCOP_shmctl:
3230 ret = do_shmctl(first, second, third);
3231 break;
3232 default:
3233 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3234 ret = -TARGET_ENOSYS;
3235 break;
3236 }
3237 return ret;
3238 }
3239 #endif
3240
3241 /* kernel structure types definitions */
3242
3243 #define STRUCT(name, ...) STRUCT_ ## name,
3244 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3245 enum {
3246 #include "syscall_types.h"
3247 };
3248 #undef STRUCT
3249 #undef STRUCT_SPECIAL
3250
3251 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3252 #define STRUCT_SPECIAL(name)
3253 #include "syscall_types.h"
3254 #undef STRUCT
3255 #undef STRUCT_SPECIAL
3256
3257 typedef struct IOCTLEntry IOCTLEntry;
3258
3259 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3260 int fd, abi_long cmd, abi_long arg);
3261
3262 struct IOCTLEntry {
3263 unsigned int target_cmd;
3264 unsigned int host_cmd;
3265 const char *name;
3266 int access;
3267 do_ioctl_fn *do_ioctl;
3268 const argtype arg_type[5];
3269 };
3270
3271 #define IOC_R 0x0001
3272 #define IOC_W 0x0002
3273 #define IOC_RW (IOC_R | IOC_W)
3274
3275 #define MAX_STRUCT_SIZE 4096
3276
3277 #ifdef CONFIG_FIEMAP
3278 /* So fiemap access checks don't overflow on 32 bit systems.
3279 * This is very slightly smaller than the limit imposed by
3280 * the underlying kernel.
3281 */
3282 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3283 / sizeof(struct fiemap_extent))
3284
3285 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3286 int fd, abi_long cmd, abi_long arg)
3287 {
3288 /* The parameter for this ioctl is a struct fiemap followed
3289 * by an array of struct fiemap_extent whose size is set
3290 * in fiemap->fm_extent_count. The array is filled in by the
3291 * ioctl.
3292 */
3293 int target_size_in, target_size_out;
3294 struct fiemap *fm;
3295 const argtype *arg_type = ie->arg_type;
3296 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3297 void *argptr, *p;
3298 abi_long ret;
3299 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3300 uint32_t outbufsz;
3301 int free_fm = 0;
3302
3303 assert(arg_type[0] == TYPE_PTR);
3304 assert(ie->access == IOC_RW);
3305 arg_type++;
3306 target_size_in = thunk_type_size(arg_type, 0);
3307 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3308 if (!argptr) {
3309 return -TARGET_EFAULT;
3310 }
3311 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3312 unlock_user(argptr, arg, 0);
3313 fm = (struct fiemap *)buf_temp;
3314 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3315 return -TARGET_EINVAL;
3316 }
3317
3318 outbufsz = sizeof (*fm) +
3319 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3320
3321 if (outbufsz > MAX_STRUCT_SIZE) {
3322 /* We can't fit all the extents into the fixed size buffer.
3323 * Allocate one that is large enough and use it instead.
3324 */
3325 fm = malloc(outbufsz);
3326 if (!fm) {
3327 return -TARGET_ENOMEM;
3328 }
3329 memcpy(fm, buf_temp, sizeof(struct fiemap));
3330 free_fm = 1;
3331 }
3332 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3333 if (!is_error(ret)) {
3334 target_size_out = target_size_in;
3335 /* An extent_count of 0 means we were only counting the extents
3336 * so there are no structs to copy
3337 */
3338 if (fm->fm_extent_count != 0) {
3339 target_size_out += fm->fm_mapped_extents * extent_size;
3340 }
3341 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3342 if (!argptr) {
3343 ret = -TARGET_EFAULT;
3344 } else {
3345 /* Convert the struct fiemap */
3346 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3347 if (fm->fm_extent_count != 0) {
3348 p = argptr + target_size_in;
3349 /* ...and then all the struct fiemap_extents */
3350 for (i = 0; i < fm->fm_mapped_extents; i++) {
3351 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3352 THUNK_TARGET);
3353 p += extent_size;
3354 }
3355 }
3356 unlock_user(argptr, arg, target_size_out);
3357 }
3358 }
3359 if (free_fm) {
3360 free(fm);
3361 }
3362 return ret;
3363 }
3364 #endif
3365
3366 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3367 int fd, abi_long cmd, abi_long arg)
3368 {
3369 const argtype *arg_type = ie->arg_type;
3370 int target_size;
3371 void *argptr;
3372 int ret;
3373 struct ifconf *host_ifconf;
3374 uint32_t outbufsz;
3375 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3376 int target_ifreq_size;
3377 int nb_ifreq;
3378 int free_buf = 0;
3379 int i;
3380 int target_ifc_len;
3381 abi_long target_ifc_buf;
3382 int host_ifc_len;
3383 char *host_ifc_buf;
3384
3385 assert(arg_type[0] == TYPE_PTR);
3386 assert(ie->access == IOC_RW);
3387
3388 arg_type++;
3389 target_size = thunk_type_size(arg_type, 0);
3390
3391 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3392 if (!argptr)
3393 return -TARGET_EFAULT;
3394 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3395 unlock_user(argptr, arg, 0);
3396
3397 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3398 target_ifc_len = host_ifconf->ifc_len;
3399 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3400
3401 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3402 nb_ifreq = target_ifc_len / target_ifreq_size;
3403 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3404
3405 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3406 if (outbufsz > MAX_STRUCT_SIZE) {
3407 /* We can't fit all the extents into the fixed size buffer.
3408 * Allocate one that is large enough and use it instead.
3409 */
3410 host_ifconf = malloc(outbufsz);
3411 if (!host_ifconf) {
3412 return -TARGET_ENOMEM;
3413 }
3414 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3415 free_buf = 1;
3416 }
3417 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3418
3419 host_ifconf->ifc_len = host_ifc_len;
3420 host_ifconf->ifc_buf = host_ifc_buf;
3421
3422 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3423 if (!is_error(ret)) {
3424 /* convert host ifc_len to target ifc_len */
3425
3426 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3427 target_ifc_len = nb_ifreq * target_ifreq_size;
3428 host_ifconf->ifc_len = target_ifc_len;
3429
3430 /* restore target ifc_buf */
3431
3432 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3433
3434 /* copy struct ifconf to target user */
3435
3436 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3437 if (!argptr)
3438 return -TARGET_EFAULT;
3439 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3440 unlock_user(argptr, arg, target_size);
3441
3442 /* copy ifreq[] to target user */
3443
3444 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3445 for (i = 0; i < nb_ifreq ; i++) {
3446 thunk_convert(argptr + i * target_ifreq_size,
3447 host_ifc_buf + i * sizeof(struct ifreq),
3448 ifreq_arg_type, THUNK_TARGET);
3449 }
3450 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3451 }
3452
3453 if (free_buf) {
3454 free(host_ifconf);
3455 }
3456
3457 return ret;
3458 }
3459
3460 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3461 abi_long cmd, abi_long arg)
3462 {
3463 void *argptr;
3464 struct dm_ioctl *host_dm;
3465 abi_long guest_data;
3466 uint32_t guest_data_size;
3467 int target_size;
3468 const argtype *arg_type = ie->arg_type;
3469 abi_long ret;
3470 void *big_buf = NULL;
3471 char *host_data;
3472
3473 arg_type++;
3474 target_size = thunk_type_size(arg_type, 0);
3475 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3476 if (!argptr) {
3477 ret = -TARGET_EFAULT;
3478 goto out;
3479 }
3480 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3481 unlock_user(argptr, arg, 0);
3482
3483 /* buf_temp is too small, so fetch things into a bigger buffer */
3484 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3485 memcpy(big_buf, buf_temp, target_size);
3486 buf_temp = big_buf;
3487 host_dm = big_buf;
3488
3489 guest_data = arg + host_dm->data_start;
3490 if ((guest_data - arg) < 0) {
3491 ret = -EINVAL;
3492 goto out;
3493 }
3494 guest_data_size = host_dm->data_size - host_dm->data_start;
3495 host_data = (char*)host_dm + host_dm->data_start;
3496
3497 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3498 switch (ie->host_cmd) {
3499 case DM_REMOVE_ALL:
3500 case DM_LIST_DEVICES:
3501 case DM_DEV_CREATE:
3502 case DM_DEV_REMOVE:
3503 case DM_DEV_SUSPEND:
3504 case DM_DEV_STATUS:
3505 case DM_DEV_WAIT:
3506 case DM_TABLE_STATUS:
3507 case DM_TABLE_CLEAR:
3508 case DM_TABLE_DEPS:
3509 case DM_LIST_VERSIONS:
3510 /* no input data */
3511 break;
3512 case DM_DEV_RENAME:
3513 case DM_DEV_SET_GEOMETRY:
3514 /* data contains only strings */
3515 memcpy(host_data, argptr, guest_data_size);
3516 break;
3517 case DM_TARGET_MSG:
3518 memcpy(host_data, argptr, guest_data_size);
3519 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3520 break;
3521 case DM_TABLE_LOAD:
3522 {
3523 void *gspec = argptr;
3524 void *cur_data = host_data;
3525 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3526 int spec_size = thunk_type_size(arg_type, 0);
3527 int i;
3528
3529 for (i = 0; i < host_dm->target_count; i++) {
3530 struct dm_target_spec *spec = cur_data;
3531 uint32_t next;
3532 int slen;
3533
3534 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3535 slen = strlen((char*)gspec + spec_size) + 1;
3536 next = spec->next;
3537 spec->next = sizeof(*spec) + slen;
3538 strcpy((char*)&spec[1], gspec + spec_size);
3539 gspec += next;
3540 cur_data += spec->next;
3541 }
3542 break;
3543 }
3544 default:
3545 ret = -TARGET_EINVAL;
3546 goto out;
3547 }
3548 unlock_user(argptr, guest_data, 0);
3549
3550 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3551 if (!is_error(ret)) {
3552 guest_data = arg + host_dm->data_start;
3553 guest_data_size = host_dm->data_size - host_dm->data_start;
3554 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3555 switch (ie->host_cmd) {
3556 case DM_REMOVE_ALL:
3557 case DM_DEV_CREATE:
3558 case DM_DEV_REMOVE:
3559 case DM_DEV_RENAME:
3560 case DM_DEV_SUSPEND:
3561 case DM_DEV_STATUS:
3562 case DM_TABLE_LOAD:
3563 case DM_TABLE_CLEAR:
3564 case DM_TARGET_MSG:
3565 case DM_DEV_SET_GEOMETRY:
3566 /* no return data */
3567 break;
3568 case DM_LIST_DEVICES:
3569 {
3570 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3571 uint32_t remaining_data = guest_data_size;
3572 void *cur_data = argptr;
3573 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3574 int nl_size = 12; /* can't use thunk_size due to alignment */
3575
3576 while (1) {
3577 uint32_t next = nl->next;
3578 if (next) {
3579 nl->next = nl_size + (strlen(nl->name) + 1);
3580 }
3581 if (remaining_data < nl->next) {
3582 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3583 break;
3584 }
3585 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3586 strcpy(cur_data + nl_size, nl->name);
3587 cur_data += nl->next;
3588 remaining_data -= nl->next;
3589 if (!next) {
3590 break;
3591 }
3592 nl = (void*)nl + next;
3593 }
3594 break;
3595 }
3596 case DM_DEV_WAIT:
3597 case DM_TABLE_STATUS:
3598 {
3599 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3600 void *cur_data = argptr;
3601 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3602 int spec_size = thunk_type_size(arg_type, 0);
3603 int i;
3604
3605 for (i = 0; i < host_dm->target_count; i++) {
3606 uint32_t next = spec->next;
3607 int slen = strlen((char*)&spec[1]) + 1;
3608 spec->next = (cur_data - argptr) + spec_size + slen;
3609 if (guest_data_size < spec->next) {
3610 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3611 break;
3612 }
3613 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3614 strcpy(cur_data + spec_size, (char*)&spec[1]);
3615 cur_data = argptr + spec->next;
3616 spec = (void*)host_dm + host_dm->data_start + next;
3617 }
3618 break;
3619 }
3620 case DM_TABLE_DEPS:
3621 {
3622 void *hdata = (void*)host_dm + host_dm->data_start;
3623 int count = *(uint32_t*)hdata;
3624 uint64_t *hdev = hdata + 8;
3625 uint64_t *gdev = argptr + 8;
3626 int i;
3627
3628 *(uint32_t*)argptr = tswap32(count);
3629 for (i = 0; i < count; i++) {
3630 *gdev = tswap64(*hdev);
3631 gdev++;
3632 hdev++;
3633 }
3634 break;
3635 }
3636 case DM_LIST_VERSIONS:
3637 {
3638 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3639 uint32_t remaining_data = guest_data_size;
3640 void *cur_data = argptr;
3641 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3642 int vers_size = thunk_type_size(arg_type, 0);
3643
3644 while (1) {
3645 uint32_t next = vers->next;
3646 if (next) {
3647 vers->next = vers_size + (strlen(vers->name) + 1);
3648 }
3649 if (remaining_data < vers->next) {
3650 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3651 break;
3652 }
3653 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3654 strcpy(cur_data + vers_size, vers->name);
3655 cur_data += vers->next;
3656 remaining_data -= vers->next;
3657 if (!next) {
3658 break;
3659 }
3660 vers = (void*)vers + next;
3661 }
3662 break;
3663 }
3664 default:
3665 ret = -TARGET_EINVAL;
3666 goto out;
3667 }
3668 unlock_user(argptr, guest_data, guest_data_size);
3669
3670 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3671 if (!argptr) {
3672 ret = -TARGET_EFAULT;
3673 goto out;
3674 }
3675 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3676 unlock_user(argptr, arg, target_size);
3677 }
3678 out:
3679 g_free(big_buf);
3680 return ret;
3681 }
3682
3683 static IOCTLEntry ioctl_entries[] = {
3684 #define IOCTL(cmd, access, ...) \
3685 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3686 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3687 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3688 #include "ioctls.h"
3689 { 0, 0, },
3690 };
3691
3692 /* ??? Implement proper locking for ioctls. */
3693 /* do_ioctl() Must return target values and target errnos. */
3694 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3695 {
3696 const IOCTLEntry *ie;
3697 const argtype *arg_type;
3698 abi_long ret;
3699 uint8_t buf_temp[MAX_STRUCT_SIZE];
3700 int target_size;
3701 void *argptr;
3702
3703 ie = ioctl_entries;
3704 for(;;) {
3705 if (ie->target_cmd == 0) {
3706 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3707 return -TARGET_ENOSYS;
3708 }
3709 if (ie->target_cmd == cmd)
3710 break;
3711 ie++;
3712 }
3713 arg_type = ie->arg_type;
3714 #if defined(DEBUG)
3715 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3716 #endif
3717 if (ie->do_ioctl) {
3718 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3719 }
3720
3721 switch(arg_type[0]) {
3722 case TYPE_NULL:
3723 /* no argument */
3724 ret = get_errno(ioctl(fd, ie->host_cmd));
3725 break;
3726 case TYPE_PTRVOID:
3727 case TYPE_INT:
3728 /* int argment */
3729 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3730 break;
3731 case TYPE_PTR:
3732 arg_type++;
3733 target_size = thunk_type_size(arg_type, 0);
3734 switch(ie->access) {
3735 case IOC_R:
3736 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3737 if (!is_error(ret)) {
3738 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3739 if (!argptr)
3740 return -TARGET_EFAULT;
3741 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3742 unlock_user(argptr, arg, target_size);
3743 }
3744 break;
3745 case IOC_W:
3746 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3747 if (!argptr)
3748 return -TARGET_EFAULT;
3749 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3750 unlock_user(argptr, arg, 0);
3751 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3752 break;
3753 default:
3754 case IOC_RW:
3755 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3756 if (!argptr)
3757 return -TARGET_EFAULT;
3758 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3759 unlock_user(argptr, arg, 0);
3760 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3761 if (!is_error(ret)) {
3762 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3763 if (!argptr)
3764 return -TARGET_EFAULT;
3765 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3766 unlock_user(argptr, arg, target_size);
3767 }
3768 break;
3769 }
3770 break;
3771 default:
3772 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3773 (long)cmd, arg_type[0]);
3774 ret = -TARGET_ENOSYS;
3775 break;
3776 }
3777 return ret;
3778 }
3779
3780 static const bitmask_transtbl iflag_tbl[] = {
3781 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3782 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3783 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3784 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3785 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3786 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3787 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3788 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3789 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3790 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3791 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3792 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3793 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3794 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3795 { 0, 0, 0, 0 }
3796 };
3797
3798 static const bitmask_transtbl oflag_tbl[] = {
3799 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3800 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3801 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3802 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3803 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3804 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3805 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3806 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3807 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3808 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3809 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3810 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3811 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3812 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3813 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3814 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3815 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3816 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3817 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3818 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3819 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3820 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3821 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3822 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3823 { 0, 0, 0, 0 }
3824 };
3825
3826 static const bitmask_transtbl cflag_tbl[] = {
3827 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3828 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3829 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3830 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3831 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3832 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3833 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3834 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3835 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3836 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3837 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3838 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3839 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3840 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3841 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3842 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3843 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3844 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3845 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3846 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3847 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3848 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3849 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3850 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3851 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3852 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3853 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3854 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3855 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3856 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3857 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3858 { 0, 0, 0, 0 }
3859 };
3860
3861 static const bitmask_transtbl lflag_tbl[] = {
3862 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3863 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3864 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3865 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3866 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3867 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3868 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3869 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3870 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3871 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3872 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3873 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3874 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3875 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3876 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3877 { 0, 0, 0, 0 }
3878 };
3879
3880 static void target_to_host_termios (void *dst, const void *src)
3881 {
3882 struct host_termios *host = dst;
3883 const struct target_termios *target = src;
3884
3885 host->c_iflag =
3886 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3887 host->c_oflag =
3888 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3889 host->c_cflag =
3890 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3891 host->c_lflag =
3892 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3893 host->c_line = target->c_line;
3894
3895 memset(host->c_cc, 0, sizeof(host->c_cc));
3896 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3897 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3898 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3899 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3900 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3901 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3902 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3903 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3904 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3905 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3906 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3907 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3908 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3909 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3910 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3911 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3912 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3913 }
3914
3915 static void host_to_target_termios (void *dst, const void *src)
3916 {
3917 struct target_termios *target = dst;
3918 const struct host_termios *host = src;
3919
3920 target->c_iflag =
3921 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3922 target->c_oflag =
3923 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3924 target->c_cflag =
3925 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3926 target->c_lflag =
3927 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3928 target->c_line = host->c_line;
3929
3930 memset(target->c_cc, 0, sizeof(target->c_cc));
3931 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3932 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3933 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3934 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3935 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3936 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3937 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3938 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3939 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3940 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3941 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3942 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3943 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3944 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3945 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3946 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3947 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3948 }
3949
3950 static const StructEntry struct_termios_def = {
3951 .convert = { host_to_target_termios, target_to_host_termios },
3952 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3953 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3954 };
3955
3956 static bitmask_transtbl mmap_flags_tbl[] = {
3957 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3958 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3959 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3960 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3961 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3962 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3963 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3964 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3965 { 0, 0, 0, 0 }
3966 };
3967
3968 #if defined(TARGET_I386)
3969
3970 /* NOTE: there is really one LDT for all the threads */
3971 static uint8_t *ldt_table;
3972
3973 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3974 {
3975 int size;
3976 void *p;
3977
3978 if (!ldt_table)
3979 return 0;
3980 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3981 if (size > bytecount)
3982 size = bytecount;
3983 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3984 if (!p)
3985 return -TARGET_EFAULT;
3986 /* ??? Should this by byteswapped? */
3987 memcpy(p, ldt_table, size);
3988 unlock_user(p, ptr, size);
3989 return size;
3990 }
3991
3992 /* XXX: add locking support */
3993 static abi_long write_ldt(CPUX86State *env,
3994 abi_ulong ptr, unsigned long bytecount, int oldmode)
3995 {
3996 struct target_modify_ldt_ldt_s ldt_info;
3997 struct target_modify_ldt_ldt_s *target_ldt_info;
3998 int seg_32bit, contents, read_exec_only, limit_in_pages;
3999 int seg_not_present, useable, lm;
4000 uint32_t *lp, entry_1, entry_2;
4001
4002 if (bytecount != sizeof(ldt_info))
4003 return -TARGET_EINVAL;
4004 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4005 return -TARGET_EFAULT;
4006 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4007 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4008 ldt_info.limit = tswap32(target_ldt_info->limit);
4009 ldt_info.flags = tswap32(target_ldt_info->flags);
4010 unlock_user_struct(target_ldt_info, ptr, 0);
4011
4012 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4013 return -TARGET_EINVAL;
4014 seg_32bit = ldt_info.flags & 1;
4015 contents = (ldt_info.flags >> 1) & 3;
4016 read_exec_only = (ldt_info.flags >> 3) & 1;
4017 limit_in_pages = (ldt_info.flags >> 4) & 1;
4018 seg_not_present = (ldt_info.flags >> 5) & 1;
4019 useable = (ldt_info.flags >> 6) & 1;
4020 #ifdef TARGET_ABI32
4021 lm = 0;
4022 #else
4023 lm = (ldt_info.flags >> 7) & 1;
4024 #endif
4025 if (contents == 3) {
4026 if (oldmode)
4027 return -TARGET_EINVAL;
4028 if (seg_not_present == 0)
4029 return -TARGET_EINVAL;
4030 }
4031 /* allocate the LDT */
4032 if (!ldt_table) {
4033 env->ldt.base = target_mmap(0,
4034 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4035 PROT_READ|PROT_WRITE,
4036 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4037 if (env->ldt.base == -1)
4038 return -TARGET_ENOMEM;
4039 memset(g2h(env->ldt.base), 0,
4040 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4041 env->ldt.limit = 0xffff;
4042 ldt_table = g2h(env->ldt.base);
4043 }
4044
4045 /* NOTE: same code as Linux kernel */
4046 /* Allow LDTs to be cleared by the user. */
4047 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4048 if (oldmode ||
4049 (contents == 0 &&
4050 read_exec_only == 1 &&
4051 seg_32bit == 0 &&
4052 limit_in_pages == 0 &&
4053 seg_not_present == 1 &&
4054 useable == 0 )) {
4055 entry_1 = 0;
4056 entry_2 = 0;
4057 goto install;
4058 }
4059 }
4060
4061 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4062 (ldt_info.limit & 0x0ffff);
4063 entry_2 = (ldt_info.base_addr & 0xff000000) |
4064 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4065 (ldt_info.limit & 0xf0000) |
4066 ((read_exec_only ^ 1) << 9) |
4067 (contents << 10) |
4068 ((seg_not_present ^ 1) << 15) |
4069 (seg_32bit << 22) |
4070 (limit_in_pages << 23) |
4071 (lm << 21) |
4072 0x7000;
4073 if (!oldmode)
4074 entry_2 |= (useable << 20);
4075
4076 /* Install the new entry ... */
4077 install:
4078 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4079 lp[0] = tswap32(entry_1);
4080 lp[1] = tswap32(entry_2);
4081 return 0;
4082 }
4083
4084 /* specific and weird i386 syscalls */
4085 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4086 unsigned long bytecount)
4087 {
4088 abi_long ret;
4089
4090 switch (func) {
4091 case 0:
4092 ret = read_ldt(ptr, bytecount);
4093 break;
4094 case 1:
4095 ret = write_ldt(env, ptr, bytecount, 1);
4096 break;
4097 case 0x11:
4098 ret = write_ldt(env, ptr, bytecount, 0);
4099 break;
4100 default:
4101 ret = -TARGET_ENOSYS;
4102 break;
4103 }
4104 return ret;
4105 }
4106
4107 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4108 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4109 {
4110 uint64_t *gdt_table = g2h(env->gdt.base);
4111 struct target_modify_ldt_ldt_s ldt_info;
4112 struct target_modify_ldt_ldt_s *target_ldt_info;
4113 int seg_32bit, contents, read_exec_only, limit_in_pages;
4114 int seg_not_present, useable, lm;
4115 uint32_t *lp, entry_1, entry_2;
4116 int i;
4117
4118 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4119 if (!target_ldt_info)
4120 return -TARGET_EFAULT;
4121 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4122 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4123 ldt_info.limit = tswap32(target_ldt_info->limit);
4124 ldt_info.flags = tswap32(target_ldt_info->flags);
4125 if (ldt_info.entry_number == -1) {
4126 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4127 if (gdt_table[i] == 0) {
4128 ldt_info.entry_number = i;
4129 target_ldt_info->entry_number = tswap32(i);
4130 break;
4131 }
4132 }
4133 }
4134 unlock_user_struct(target_ldt_info, ptr, 1);
4135
4136 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4137 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4138 return -TARGET_EINVAL;
4139 seg_32bit = ldt_info.flags & 1;
4140 contents = (ldt_info.flags >> 1) & 3;
4141 read_exec_only = (ldt_info.flags >> 3) & 1;
4142 limit_in_pages = (ldt_info.flags >> 4) & 1;
4143 seg_not_present = (ldt_info.flags >> 5) & 1;
4144 useable = (ldt_info.flags >> 6) & 1;
4145 #ifdef TARGET_ABI32
4146 lm = 0;
4147 #else
4148 lm = (ldt_info.flags >> 7) & 1;
4149 #endif
4150
4151 if (contents == 3) {
4152 if (seg_not_present == 0)
4153 return -TARGET_EINVAL;
4154 }
4155
4156 /* NOTE: same code as Linux kernel */
4157 /* Allow LDTs to be cleared by the user. */
4158 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4159 if ((contents == 0 &&
4160 read_exec_only == 1 &&
4161 seg_32bit == 0 &&
4162 limit_in_pages == 0 &&
4163 seg_not_present == 1 &&
4164 useable == 0 )) {
4165 entry_1 = 0;
4166 entry_2 = 0;
4167 goto install;
4168 }
4169 }
4170
4171 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4172 (ldt_info.limit & 0x0ffff);
4173 entry_2 = (ldt_info.base_addr & 0xff000000) |
4174 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4175 (ldt_info.limit & 0xf0000) |
4176 ((read_exec_only ^ 1) << 9) |
4177 (contents << 10) |
4178 ((seg_not_present ^ 1) << 15) |
4179 (seg_32bit << 22) |
4180 (limit_in_pages << 23) |
4181 (useable << 20) |
4182 (lm << 21) |
4183 0x7000;
4184
4185 /* Install the new entry ... */
4186 install:
4187 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4188 lp[0] = tswap32(entry_1);
4189 lp[1] = tswap32(entry_2);
4190 return 0;
4191 }
4192
4193 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4194 {
4195 struct target_modify_ldt_ldt_s *target_ldt_info;
4196 uint64_t *gdt_table = g2h(env->gdt.base);
4197 uint32_t base_addr, limit, flags;
4198 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4199 int seg_not_present, useable, lm;
4200 uint32_t *lp, entry_1, entry_2;
4201
4202 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4203 if (!target_ldt_info)
4204 return -TARGET_EFAULT;
4205 idx = tswap32(target_ldt_info->entry_number);
4206 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4207 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4208 unlock_user_struct(target_ldt_info, ptr, 1);
4209 return -TARGET_EINVAL;
4210 }
4211 lp = (uint32_t *)(gdt_table + idx);
4212 entry_1 = tswap32(lp[0]);
4213 entry_2 = tswap32(lp[1]);
4214
4215 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4216 contents = (entry_2 >> 10) & 3;
4217 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4218 seg_32bit = (entry_2 >> 22) & 1;
4219 limit_in_pages = (entry_2 >> 23) & 1;
4220 useable = (entry_2 >> 20) & 1;
4221 #ifdef TARGET_ABI32
4222 lm = 0;
4223 #else
4224 lm = (entry_2 >> 21) & 1;
4225 #endif
4226 flags = (seg_32bit << 0) | (contents << 1) |
4227 (read_exec_only << 3) | (limit_in_pages << 4) |
4228 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4229 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4230 base_addr = (entry_1 >> 16) |
4231 (entry_2 & 0xff000000) |
4232 ((entry_2 & 0xff) << 16);
4233 target_ldt_info->base_addr = tswapal(base_addr);
4234 target_ldt_info->limit = tswap32(limit);
4235 target_ldt_info->flags = tswap32(flags);
4236 unlock_user_struct(target_ldt_info, ptr, 1);
4237 return 0;
4238 }
4239 #endif /* TARGET_I386 && TARGET_ABI32 */
4240
4241 #ifndef TARGET_ABI32
4242 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4243 {
4244 abi_long ret = 0;
4245 abi_ulong val;
4246 int idx;
4247
4248 switch(code) {
4249 case TARGET_ARCH_SET_GS:
4250 case TARGET_ARCH_SET_FS:
4251 if (code == TARGET_ARCH_SET_GS)
4252 idx = R_GS;
4253 else
4254 idx = R_FS;
4255 cpu_x86_load_seg(env, idx, 0);
4256 env->segs[idx].base = addr;
4257 break;
4258 case TARGET_ARCH_GET_GS:
4259 case TARGET_ARCH_GET_FS:
4260 if (code == TARGET_ARCH_GET_GS)
4261 idx = R_GS;
4262 else
4263 idx = R_FS;
4264 val = env->segs[idx].base;
4265 if (put_user(val, addr, abi_ulong))
4266 ret = -TARGET_EFAULT;
4267 break;
4268 default:
4269 ret = -TARGET_EINVAL;
4270 break;
4271 }
4272 return ret;
4273 }
4274 #endif
4275
4276 #endif /* defined(TARGET_I386) */
4277
4278 #define NEW_STACK_SIZE 0x40000
4279
4280 #if defined(CONFIG_USE_NPTL)
4281
4282 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4283 typedef struct {
4284 CPUArchState *env;
4285 pthread_mutex_t mutex;
4286 pthread_cond_t cond;
4287 pthread_t thread;
4288 uint32_t tid;
4289 abi_ulong child_tidptr;
4290 abi_ulong parent_tidptr;
4291 sigset_t sigmask;
4292 } new_thread_info;
4293
4294 static void *clone_func(void *arg)
4295 {
4296 new_thread_info *info = arg;
4297 CPUArchState *env;
4298 TaskState *ts;
4299
4300 env = info->env;
4301 thread_env = env;
4302 ts = (TaskState *)thread_env->opaque;
4303 info->tid = gettid();
4304 env->host_tid = info->tid;
4305 task_settid(ts);
4306 if (info->child_tidptr)
4307 put_user_u32(info->tid, info->child_tidptr);
4308 if (info->parent_tidptr)
4309 put_user_u32(info->tid, info->parent_tidptr);
4310 /* Enable signals. */
4311 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4312 /* Signal to the parent that we're ready. */
4313 pthread_mutex_lock(&info->mutex);
4314 pthread_cond_broadcast(&info->cond);
4315 pthread_mutex_unlock(&info->mutex);
4316 /* Wait until the parent has finshed initializing the tls state. */
4317 pthread_mutex_lock(&clone_lock);
4318 pthread_mutex_unlock(&clone_lock);
4319 cpu_loop(env);
4320 /* never exits */
4321 return NULL;
4322 }
4323 #else
4324
4325 static int clone_func(void *arg)
4326 {
4327 CPUArchState *env = arg;
4328 cpu_loop(env);
4329 /* never exits */
4330 return 0;
4331 }
4332 #endif
4333
4334 /* do_fork() Must return host values and target errnos (unlike most
4335 do_*() functions). */
4336 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4337 abi_ulong parent_tidptr, target_ulong newtls,
4338 abi_ulong child_tidptr)
4339 {
4340 int ret;
4341 TaskState *ts;
4342 CPUArchState *new_env;
4343 #if defined(CONFIG_USE_NPTL)
4344 unsigned int nptl_flags;
4345 sigset_t sigmask;
4346 #else
4347 uint8_t *new_stack;
4348 #endif
4349
4350 /* Emulate vfork() with fork() */
4351 if (flags & CLONE_VFORK)
4352 flags &= ~(CLONE_VFORK | CLONE_VM);
4353
4354 if (flags & CLONE_VM) {
4355 TaskState *parent_ts = (TaskState *)env->opaque;
4356 #if defined(CONFIG_USE_NPTL)
4357 new_thread_info info;
4358 pthread_attr_t attr;
4359 #endif
4360 ts = g_malloc0(sizeof(TaskState));
4361 init_task_state(ts);
4362 /* we create a new CPU instance. */
4363 new_env = cpu_copy(env);
4364 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4365 cpu_reset(ENV_GET_CPU(new_env));
4366 #endif
4367 /* Init regs that differ from the parent. */
4368 cpu_clone_regs(new_env, newsp);
4369 new_env->opaque = ts;
4370 ts->bprm = parent_ts->bprm;
4371 ts->info = parent_ts->info;
4372 #if defined(CONFIG_USE_NPTL)
4373 nptl_flags = flags;
4374 flags &= ~CLONE_NPTL_FLAGS2;
4375
4376 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4377 ts->child_tidptr = child_tidptr;
4378 }
4379
4380 if (nptl_flags & CLONE_SETTLS)
4381 cpu_set_tls (new_env, newtls);
4382
4383 /* Grab a mutex so that thread setup appears atomic. */
4384 pthread_mutex_lock(&clone_lock);
4385
4386 memset(&info, 0, sizeof(info));
4387 pthread_mutex_init(&info.mutex, NULL);
4388 pthread_mutex_lock(&info.mutex);
4389 pthread_cond_init(&info.cond, NULL);
4390 info.env = new_env;
4391 if (nptl_flags & CLONE_CHILD_SETTID)
4392 info.child_tidptr = child_tidptr;
4393 if (nptl_flags & CLONE_PARENT_SETTID)
4394 info.parent_tidptr = parent_tidptr;
4395
4396 ret = pthread_attr_init(&attr);
4397 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4398 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4399 /* It is not safe to deliver signals until the child has finished
4400 initializing, so temporarily block all signals. */
4401 sigfillset(&sigmask);
4402 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4403
4404 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4405 /* TODO: Free new CPU state if thread creation failed. */
4406
4407 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4408 pthread_attr_destroy(&attr);
4409 if (ret == 0) {
4410 /* Wait for the child to initialize. */
4411 pthread_cond_wait(&info.cond, &info.mutex);
4412 ret = info.tid;
4413 if (flags & CLONE_PARENT_SETTID)
4414 put_user_u32(ret, parent_tidptr);
4415 } else {
4416 ret = -1;
4417 }
4418 pthread_mutex_unlock(&info.mutex);
4419 pthread_cond_destroy(&info.cond);
4420 pthread_mutex_destroy(&info.mutex);
4421 pthread_mutex_unlock(&clone_lock);
4422 #else
4423 if (flags & CLONE_NPTL_FLAGS2)
4424 return -EINVAL;
4425 /* This is probably going to die very quickly, but do it anyway. */
4426 new_stack = g_malloc0 (NEW_STACK_SIZE);
4427 #ifdef __ia64__
4428 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4429 #else
4430 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4431 #endif
4432 #endif
4433 } else {
4434 /* if no CLONE_VM, we consider it is a fork */
4435 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4436 return -EINVAL;
4437 fork_start();
4438 ret = fork();
4439 if (ret == 0) {
4440 /* Child Process. */
4441 cpu_clone_regs(env, newsp);
4442 fork_end(1);
4443 #if defined(CONFIG_USE_NPTL)
4444 /* There is a race condition here. The parent process could
4445 theoretically read the TID in the child process before the child
4446 tid is set. This would require using either ptrace
4447 (not implemented) or having *_tidptr to point at a shared memory
4448 mapping. We can't repeat the spinlock hack used above because
4449 the child process gets its own copy of the lock. */
4450 if (flags & CLONE_CHILD_SETTID)
4451 put_user_u32(gettid(), child_tidptr);
4452 if (flags & CLONE_PARENT_SETTID)
4453 put_user_u32(gettid(), parent_tidptr);
4454 ts = (TaskState *)env->opaque;
4455 if (flags & CLONE_SETTLS)
4456 cpu_set_tls (env, newtls);
4457 if (flags & CLONE_CHILD_CLEARTID)
4458 ts->child_tidptr = child_tidptr;
4459 #endif
4460 } else {
4461 fork_end(0);
4462 }
4463 }
4464 return ret;
4465 }
4466
4467 /* warning : doesn't handle linux specific flags... */
4468 static int target_to_host_fcntl_cmd(int cmd)
4469 {
4470 switch(cmd) {
4471 case TARGET_F_DUPFD:
4472 case TARGET_F_GETFD:
4473 case TARGET_F_SETFD:
4474 case TARGET_F_GETFL:
4475 case TARGET_F_SETFL:
4476 return cmd;
4477 case TARGET_F_GETLK:
4478 return F_GETLK;
4479 case TARGET_F_SETLK:
4480 return F_SETLK;
4481 case TARGET_F_SETLKW:
4482 return F_SETLKW;
4483 case TARGET_F_GETOWN:
4484 return F_GETOWN;
4485 case TARGET_F_SETOWN:
4486 return F_SETOWN;
4487 case TARGET_F_GETSIG:
4488 return F_GETSIG;
4489 case TARGET_F_SETSIG:
4490 return F_SETSIG;
4491 #if TARGET_ABI_BITS == 32
4492 case TARGET_F_GETLK64:
4493 return F_GETLK64;
4494 case TARGET_F_SETLK64:
4495 return F_SETLK64;
4496 case TARGET_F_SETLKW64:
4497 return F_SETLKW64;
4498 #endif
4499 case TARGET_F_SETLEASE:
4500 return F_SETLEASE;
4501 case TARGET_F_GETLEASE:
4502 return F_GETLEASE;
4503 #ifdef F_DUPFD_CLOEXEC
4504 case TARGET_F_DUPFD_CLOEXEC:
4505 return F_DUPFD_CLOEXEC;
4506 #endif
4507 case TARGET_F_NOTIFY:
4508 return F_NOTIFY;
4509 default:
4510 return -TARGET_EINVAL;
4511 }
4512 return -TARGET_EINVAL;
4513 }
4514
4515 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4516 static const bitmask_transtbl flock_tbl[] = {
4517 TRANSTBL_CONVERT(F_RDLCK),
4518 TRANSTBL_CONVERT(F_WRLCK),
4519 TRANSTBL_CONVERT(F_UNLCK),
4520 TRANSTBL_CONVERT(F_EXLCK),
4521 TRANSTBL_CONVERT(F_SHLCK),
4522 { 0, 0, 0, 0 }
4523 };
4524
4525 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4526 {
4527 struct flock fl;
4528 struct target_flock *target_fl;
4529 struct flock64 fl64;
4530 struct target_flock64 *target_fl64;
4531 abi_long ret;
4532 int host_cmd = target_to_host_fcntl_cmd(cmd);
4533
4534 if (host_cmd == -TARGET_EINVAL)
4535 return host_cmd;
4536
4537 switch(cmd) {
4538 case TARGET_F_GETLK:
4539 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4540 return -TARGET_EFAULT;
4541 fl.l_type =
4542 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4543 fl.l_whence = tswap16(target_fl->l_whence);
4544 fl.l_start = tswapal(target_fl->l_start);
4545 fl.l_len = tswapal(target_fl->l_len);
4546 fl.l_pid = tswap32(target_fl->l_pid);
4547 unlock_user_struct(target_fl, arg, 0);
4548 ret = get_errno(fcntl(fd, host_cmd, &fl));
4549 if (ret == 0) {
4550 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4551 return -TARGET_EFAULT;
4552 target_fl->l_type =
4553 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4554 target_fl->l_whence = tswap16(fl.l_whence);
4555 target_fl->l_start = tswapal(fl.l_start);
4556 target_fl->l_len = tswapal(fl.l_len);
4557 target_fl->l_pid = tswap32(fl.l_pid);
4558 unlock_user_struct(target_fl, arg, 1);
4559 }
4560 break;
4561
4562 case TARGET_F_SETLK:
4563 case TARGET_F_SETLKW:
4564 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4565 return -TARGET_EFAULT;
4566 fl.l_type =
4567 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4568 fl.l_whence = tswap16(target_fl->l_whence);
4569 fl.l_start = tswapal(target_fl->l_start);
4570 fl.l_len = tswapal(target_fl->l_len);
4571 fl.l_pid = tswap32(target_fl->l_pid);
4572 unlock_user_struct(target_fl, arg, 0);
4573 ret = get_errno(fcntl(fd, host_cmd, &fl));
4574 break;
4575
4576 case TARGET_F_GETLK64:
4577 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4578 return -TARGET_EFAULT;
4579 fl64.l_type =
4580 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4581 fl64.l_whence = tswap16(target_fl64->l_whence);
4582 fl64.l_start = tswap64(target_fl64->l_start);
4583 fl64.l_len = tswap64(target_fl64->l_len);
4584 fl64.l_pid = tswap32(target_fl64->l_pid);
4585 unlock_user_struct(target_fl64, arg, 0);
4586 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4587 if (ret == 0) {
4588 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4589 return -TARGET_EFAULT;
4590 target_fl64->l_type =
4591 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4592 target_fl64->l_whence = tswap16(fl64.l_whence);
4593 target_fl64->l_start = tswap64(fl64.l_start);
4594 target_fl64->l_len = tswap64(fl64.l_len);
4595 target_fl64->l_pid = tswap32(fl64.l_pid);
4596 unlock_user_struct(target_fl64, arg, 1);
4597 }
4598 break;
4599 case TARGET_F_SETLK64:
4600 case TARGET_F_SETLKW64:
4601 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4602 return -TARGET_EFAULT;
4603 fl64.l_type =
4604 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4605 fl64.l_whence = tswap16(target_fl64->l_whence);
4606 fl64.l_start = tswap64(target_fl64->l_start);
4607 fl64.l_len = tswap64(target_fl64->l_len);
4608 fl64.l_pid = tswap32(target_fl64->l_pid);
4609 unlock_user_struct(target_fl64, arg, 0);
4610 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4611 break;
4612
4613 case TARGET_F_GETFL:
4614 ret = get_errno(fcntl(fd, host_cmd, arg));
4615 if (ret >= 0) {
4616 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4617 }
4618 break;
4619
4620 case TARGET_F_SETFL:
4621 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4622 break;
4623
4624 case TARGET_F_SETOWN:
4625 case TARGET_F_GETOWN:
4626 case TARGET_F_SETSIG:
4627 case TARGET_F_GETSIG:
4628 case TARGET_F_SETLEASE:
4629 case TARGET_F_GETLEASE:
4630 ret = get_errno(fcntl(fd, host_cmd, arg));
4631 break;
4632
4633 default:
4634 ret = get_errno(fcntl(fd, cmd, arg));
4635 break;
4636 }
4637 return ret;
4638 }
4639
4640 #ifdef USE_UID16
4641
4642 static inline int high2lowuid(int uid)
4643 {
4644 if (uid > 65535)
4645 return 65534;
4646 else
4647 return uid;
4648 }
4649
4650 static inline int high2lowgid(int gid)
4651 {
4652 if (gid > 65535)
4653 return 65534;
4654 else
4655 return gid;
4656 }
4657
4658 static inline int low2highuid(int uid)
4659 {
4660 if ((int16_t)uid == -1)
4661 return -1;
4662 else
4663 return uid;
4664 }
4665
4666 static inline int low2highgid(int gid)
4667 {
4668 if ((int16_t)gid == -1)
4669 return -1;
4670 else
4671 return gid;
4672 }
4673 static inline int tswapid(int id)
4674 {
4675 return tswap16(id);
4676 }
4677 #else /* !USE_UID16 */
4678 static inline int high2lowuid(int uid)
4679 {
4680 return uid;
4681 }
4682 static inline int high2lowgid(int gid)
4683 {
4684 return gid;
4685 }
4686 static inline int low2highuid(int uid)
4687 {
4688 return uid;
4689 }
4690 static inline int low2highgid(int gid)
4691 {
4692 return gid;
4693 }
4694 static inline int tswapid(int id)
4695 {
4696 return tswap32(id);
4697 }
4698 #endif /* USE_UID16 */
4699
4700 void syscall_init(void)
4701 {
4702 IOCTLEntry *ie;
4703 const argtype *arg_type;
4704 int size;
4705 int i;
4706
4707 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4708 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4709 #include "syscall_types.h"
4710 #undef STRUCT
4711 #undef STRUCT_SPECIAL
4712
4713 /* Build target_to_host_errno_table[] table from
4714 * host_to_target_errno_table[]. */
4715 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4716 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4717 }
4718
4719 /* we patch the ioctl size if necessary. We rely on the fact that
4720 no ioctl has all the bits at '1' in the size field */
4721 ie = ioctl_entries;
4722 while (ie->target_cmd != 0) {
4723 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4724 TARGET_IOC_SIZEMASK) {
4725 arg_type = ie->arg_type;
4726 if (arg_type[0] != TYPE_PTR) {
4727 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4728 ie->target_cmd);
4729 exit(1);
4730 }
4731 arg_type++;
4732 size = thunk_type_size(arg_type, 0);
4733 ie->target_cmd = (ie->target_cmd &
4734 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4735 (size << TARGET_IOC_SIZESHIFT);
4736 }
4737
4738 /* automatic consistency check if same arch */
4739 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4740 (defined(__x86_64__) && defined(TARGET_X86_64))
4741 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4742 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4743 ie->name, ie->target_cmd, ie->host_cmd);
4744 }
4745 #endif
4746 ie++;
4747 }
4748 }
4749
4750 #if TARGET_ABI_BITS == 32
4751 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4752 {
4753 #ifdef TARGET_WORDS_BIGENDIAN
4754 return ((uint64_t)word0 << 32) | word1;
4755 #else
4756 return ((uint64_t)word1 << 32) | word0;
4757 #endif
4758 }
4759 #else /* TARGET_ABI_BITS == 32 */
4760 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4761 {
4762 return word0;
4763 }
4764 #endif /* TARGET_ABI_BITS != 32 */
4765
4766 #ifdef TARGET_NR_truncate64
4767 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4768 abi_long arg2,
4769 abi_long arg3,
4770 abi_long arg4)
4771 {
4772 if (regpairs_aligned(cpu_env)) {
4773 arg2 = arg3;
4774 arg3 = arg4;
4775 }
4776 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4777 }
4778 #endif
4779
4780 #ifdef TARGET_NR_ftruncate64
4781 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4782 abi_long arg2,
4783 abi_long arg3,
4784 abi_long arg4)
4785 {
4786 if (regpairs_aligned(cpu_env)) {
4787 arg2 = arg3;
4788 arg3 = arg4;
4789 }
4790 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4791 }
4792 #endif
4793
4794 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4795 abi_ulong target_addr)
4796 {
4797 struct target_timespec *target_ts;
4798
4799 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4800 return -TARGET_EFAULT;
4801 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4802 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4803 unlock_user_struct(target_ts, target_addr, 0);
4804 return 0;
4805 }
4806
4807 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4808 struct timespec *host_ts)
4809 {
4810 struct target_timespec *target_ts;
4811
4812 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4813 return -TARGET_EFAULT;
4814 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4815 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4816 unlock_user_struct(target_ts, target_addr, 1);
4817 return 0;
4818 }
4819
4820 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4821 static inline abi_long host_to_target_stat64(void *cpu_env,
4822 abi_ulong target_addr,
4823 struct stat *host_st)
4824 {
4825 #ifdef TARGET_ARM
4826 if (((CPUARMState *)cpu_env)->eabi) {
4827 struct target_eabi_stat64 *target_st;
4828
4829 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4830 return -TARGET_EFAULT;
4831 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4832 __put_user(host_st->st_dev, &target_st->st_dev);
4833 __put_user(host_st->st_ino, &target_st->st_ino);
4834 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4835 __put_user(host_st->st_ino, &target_st->__st_ino);
4836 #endif
4837 __put_user(host_st->st_mode, &target_st->st_mode);
4838 __put_user(host_st->st_nlink, &target_st->st_nlink);
4839 __put_user(host_st->st_uid, &target_st->st_uid);
4840 __put_user(host_st->st_gid, &target_st->st_gid);
4841 __put_user(host_st->st_rdev, &target_st->st_rdev);
4842 __put_user(host_st->st_size, &target_st->st_size);
4843 __put_user(host_st->st_blksize, &target_st->st_blksize);
4844 __put_user(host_st->st_blocks, &target_st->st_blocks);
4845 __put_user(host_st->st_atime, &target_st->target_st_atime);
4846 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4847 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4848 unlock_user_struct(target_st, target_addr, 1);
4849 } else
4850 #endif
4851 {
4852 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4853 struct target_stat *target_st;
4854 #else
4855 struct target_stat64 *target_st;
4856 #endif
4857
4858 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4859 return -TARGET_EFAULT;
4860 memset(target_st, 0, sizeof(*target_st));
4861 __put_user(host_st->st_dev, &target_st->st_dev);
4862 __put_user(host_st->st_ino, &target_st->st_ino);
4863 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4864 __put_user(host_st->st_ino, &target_st->__st_ino);
4865 #endif
4866 __put_user(host_st->st_mode, &target_st->st_mode);
4867 __put_user(host_st->st_nlink, &target_st->st_nlink);
4868 __put_user(host_st->st_uid, &target_st->st_uid);
4869 __put_user(host_st->st_gid, &target_st->st_gid);
4870 __put_user(host_st->st_rdev, &target_st->st_rdev);
4871 /* XXX: better use of kernel struct */
4872 __put_user(host_st->st_size, &target_st->st_size);
4873 __put_user(host_st->st_blksize, &target_st->st_blksize);
4874 __put_user(host_st->st_blocks, &target_st->st_blocks);
4875 __put_user(host_st->st_atime, &target_st->target_st_atime);
4876 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4877 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4878 unlock_user_struct(target_st, target_addr, 1);
4879 }
4880
4881 return 0;
4882 }
4883 #endif
4884
4885 #if defined(CONFIG_USE_NPTL)
4886 /* ??? Using host futex calls even when target atomic operations
4887 are not really atomic probably breaks things. However implementing
4888 futexes locally would make futexes shared between multiple processes
4889 tricky. However they're probably useless because guest atomic
4890 operations won't work either. */
4891 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4892 target_ulong uaddr2, int val3)
4893 {
4894 struct timespec ts, *pts;
4895 int base_op;
4896
4897 /* ??? We assume FUTEX_* constants are the same on both host
4898 and target. */
4899 #ifdef FUTEX_CMD_MASK
4900 base_op = op & FUTEX_CMD_MASK;
4901 #else
4902 base_op = op;
4903 #endif
4904 switch (base_op) {
4905 case FUTEX_WAIT:
4906 if (timeout) {
4907 pts = &ts;
4908 target_to_host_timespec(pts, timeout);
4909 } else {
4910 pts = NULL;
4911 }
4912 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4913 pts, NULL, 0));
4914 case FUTEX_WAKE:
4915 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4916 case FUTEX_FD:
4917 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4918 case FUTEX_REQUEUE:
4919 case FUTEX_CMP_REQUEUE:
4920 case FUTEX_WAKE_OP:
4921 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4922 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4923 But the prototype takes a `struct timespec *'; insert casts
4924 to satisfy the compiler. We do not need to tswap TIMEOUT
4925 since it's not compared to guest memory. */
4926 pts = (struct timespec *)(uintptr_t) timeout;
4927 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4928 g2h(uaddr2),
4929 (base_op == FUTEX_CMP_REQUEUE
4930 ? tswap32(val3)
4931 : val3)));
4932 default:
4933 return -TARGET_ENOSYS;
4934 }
4935 }
4936 #endif
4937
4938 /* Map host to target signal numbers for the wait family of syscalls.
4939 Assume all other status bits are the same. */
4940 int host_to_target_waitstatus(int status)
4941 {
4942 if (WIFSIGNALED(status)) {
4943 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4944 }
4945 if (WIFSTOPPED(status)) {
4946 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4947 | (status & 0xff);
4948 }
4949 return status;
4950 }
4951
4952 int get_osversion(void)
4953 {
4954 static int osversion;
4955 struct new_utsname buf;
4956 const char *s;
4957 int i, n, tmp;
4958 if (osversion)
4959 return osversion;
4960 if (qemu_uname_release && *qemu_uname_release) {
4961 s = qemu_uname_release;
4962 } else {
4963 if (sys_uname(&buf))
4964 return 0;
4965 s = buf.release;
4966 }
4967 tmp = 0;
4968 for (i = 0; i < 3; i++) {
4969 n = 0;
4970 while (*s >= '0' && *s <= '9') {
4971 n *= 10;
4972 n += *s - '0';
4973 s++;
4974 }
4975 tmp = (tmp << 8) + n;
4976 if (*s == '.')
4977 s++;
4978 }
4979 osversion = tmp;
4980 return osversion;
4981 }
4982
4983
4984 static int open_self_maps(void *cpu_env, int fd)
4985 {
4986 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4987 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4988 #endif
4989 FILE *fp;
4990 char *line = NULL;
4991 size_t len = 0;
4992 ssize_t read;
4993
4994 fp = fopen("/proc/self/maps", "r");
4995 if (fp == NULL) {
4996 return -EACCES;
4997 }
4998
4999 while ((read = getline(&line, &len, fp)) != -1) {
5000 int fields, dev_maj, dev_min, inode;
5001 uint64_t min, max, offset;
5002 char flag_r, flag_w, flag_x, flag_p;
5003 char path[512] = "";
5004 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5005 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5006 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5007
5008 if ((fields < 10) || (fields > 11)) {
5009 continue;
5010 }
5011 if (!strncmp(path, "[stack]", 7)) {
5012 continue;
5013 }
5014 if (h2g_valid(min) && h2g_valid(max)) {
5015 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5016 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n",
5017 h2g(min), h2g(max), flag_r, flag_w,
5018 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5019 path[0] ? " " : "", path);
5020 }
5021 }
5022
5023 free(line);
5024 fclose(fp);
5025
5026 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5027 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5028 (unsigned long long)ts->info->stack_limit,
5029 (unsigned long long)(ts->info->start_stack +
5030 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
5031 (unsigned long long)0);
5032 #endif
5033
5034 return 0;
5035 }
5036
5037 static int open_self_stat(void *cpu_env, int fd)
5038 {
5039 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5040 abi_ulong start_stack = ts->info->start_stack;
5041 int i;
5042
5043 for (i = 0; i < 44; i++) {
5044 char buf[128];
5045 int len;
5046 uint64_t val = 0;
5047
5048 if (i == 0) {
5049 /* pid */
5050 val = getpid();
5051 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5052 } else if (i == 1) {
5053 /* app name */
5054 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5055 } else if (i == 27) {
5056 /* stack bottom */
5057 val = start_stack;
5058 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5059 } else {
5060 /* for the rest, there is MasterCard */
5061 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5062 }
5063
5064 len = strlen(buf);
5065 if (write(fd, buf, len) != len) {
5066 return -1;
5067 }
5068 }
5069
5070 return 0;
5071 }
5072
5073 static int open_self_auxv(void *cpu_env, int fd)
5074 {
5075 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5076 abi_ulong auxv = ts->info->saved_auxv;
5077 abi_ulong len = ts->info->auxv_len;
5078 char *ptr;
5079
5080 /*
5081 * Auxiliary vector is stored in target process stack.
5082 * read in whole auxv vector and copy it to file
5083 */
5084 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5085 if (ptr != NULL) {
5086 while (len > 0) {
5087 ssize_t r;
5088 r = write(fd, ptr, len);
5089 if (r <= 0) {
5090 break;
5091 }
5092 len -= r;
5093 ptr += r;
5094 }
5095 lseek(fd, 0, SEEK_SET);
5096 unlock_user(ptr, auxv, len);
5097 }
5098
5099 return 0;
5100 }
5101
5102 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5103 {
5104 struct fake_open {
5105 const char *filename;
5106 int (*fill)(void *cpu_env, int fd);
5107 };
5108 const struct fake_open *fake_open;
5109 static const struct fake_open fakes[] = {
5110 { "/proc/self/maps", open_self_maps },
5111 { "/proc/self/stat", open_self_stat },
5112 { "/proc/self/auxv", open_self_auxv },
5113 { NULL, NULL }
5114 };
5115
5116 for (fake_open = fakes; fake_open->filename; fake_open++) {
5117 if (!strncmp(pathname, fake_open->filename,
5118 strlen(fake_open->filename))) {
5119 break;
5120 }
5121 }
5122
5123 if (fake_open->filename) {
5124 const char *tmpdir;
5125 char filename[PATH_MAX];
5126 int fd, r;
5127
5128 /* create temporary file to map stat to */
5129 tmpdir = getenv("TMPDIR");
5130 if (!tmpdir)
5131 tmpdir = "/tmp";
5132 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5133 fd = mkstemp(filename);
5134 if (fd < 0) {
5135 return fd;
5136 }
5137 unlink(filename);
5138
5139 if ((r = fake_open->fill(cpu_env, fd))) {
5140 close(fd);
5141 return r;
5142 }
5143 lseek(fd, 0, SEEK_SET);
5144
5145 return fd;
5146 }
5147
5148 return get_errno(open(path(pathname), flags, mode));
5149 }
5150
5151 /* do_syscall() should always have a single exit point at the end so
5152 that actions, such as logging of syscall results, can be performed.
5153 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5154 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5155 abi_long arg2, abi_long arg3, abi_long arg4,
5156 abi_long arg5, abi_long arg6, abi_long arg7,
5157 abi_long arg8)
5158 {
5159 abi_long ret;
5160 struct stat st;
5161 struct statfs stfs;
5162 void *p;
5163
5164 #ifdef DEBUG
5165 gemu_log("syscall %d", num);
5166 #endif
5167 if(do_strace)
5168 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5169
5170 switch(num) {
5171 case TARGET_NR_exit:
5172 #ifdef CONFIG_USE_NPTL
5173 /* In old applications this may be used to implement _exit(2).
5174 However in threaded applictions it is used for thread termination,
5175 and _exit_group is used for application termination.
5176 Do thread termination if we have more then one thread. */
5177 /* FIXME: This probably breaks if a signal arrives. We should probably
5178 be disabling signals. */
5179 if (first_cpu->next_cpu) {
5180 TaskState *ts;
5181 CPUArchState **lastp;
5182 CPUArchState *p;
5183
5184 cpu_list_lock();
5185 lastp = &first_cpu;
5186 p = first_cpu;
5187 while (p && p != (CPUArchState *)cpu_env) {
5188 lastp = &p->next_cpu;
5189 p = p->next_cpu;
5190 }
5191 /* If we didn't find the CPU for this thread then something is
5192 horribly wrong. */
5193 if (!p)
5194 abort();
5195 /* Remove the CPU from the list. */
5196 *lastp = p->next_cpu;
5197 cpu_list_unlock();
5198 ts = ((CPUArchState *)cpu_env)->opaque;
5199 if (ts->child_tidptr) {
5200 put_user_u32(0, ts->child_tidptr);
5201 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5202 NULL, NULL, 0);
5203 }
5204 thread_env = NULL;
5205 object_delete(OBJECT(ENV_GET_CPU(cpu_env)));
5206 g_free(ts);
5207 pthread_exit(NULL);
5208 }
5209 #endif
5210 #ifdef TARGET_GPROF
5211 _mcleanup();
5212 #endif
5213 gdb_exit(cpu_env, arg1);
5214 _exit(arg1);
5215 ret = 0; /* avoid warning */
5216 break;
5217 case TARGET_NR_read:
5218 if (arg3 == 0)
5219 ret = 0;
5220 else {
5221 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5222 goto efault;
5223 ret = get_errno(read(arg1, p, arg3));
5224 unlock_user(p, arg2, ret);
5225 }
5226 break;
5227 case TARGET_NR_write:
5228 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5229 goto efault;
5230 ret = get_errno(write(arg1, p, arg3));
5231 unlock_user(p, arg2, 0);
5232 break;
5233 case TARGET_NR_open:
5234 if (!(p = lock_user_string(arg1)))
5235 goto efault;
5236 ret = get_errno(do_open(cpu_env, p,
5237 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5238 arg3));
5239 unlock_user(p, arg1, 0);
5240 break;
5241 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5242 case TARGET_NR_openat:
5243 if (!(p = lock_user_string(arg2)))
5244 goto efault;
5245 ret = get_errno(sys_openat(arg1,
5246 path(p),
5247 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5248 arg4));
5249 unlock_user(p, arg2, 0);
5250 break;
5251 #endif
5252 case TARGET_NR_close:
5253 ret = get_errno(close(arg1));
5254 break;
5255 case TARGET_NR_brk:
5256 ret = do_brk(arg1);
5257 break;
5258 case TARGET_NR_fork:
5259 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5260 break;
5261 #ifdef TARGET_NR_waitpid
5262 case TARGET_NR_waitpid:
5263 {
5264 int status;
5265 ret = get_errno(waitpid(arg1, &status, arg3));
5266 if (!is_error(ret) && arg2 && ret
5267 && put_user_s32(host_to_target_waitstatus(status), arg2))
5268 goto efault;
5269 }
5270 break;
5271 #endif
5272 #ifdef TARGET_NR_waitid
5273 case TARGET_NR_waitid:
5274 {
5275 siginfo_t info;
5276 info.si_pid = 0;
5277 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5278 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5279 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5280 goto efault;
5281 host_to_target_siginfo(p, &info);
5282 unlock_user(p, arg3, sizeof(target_siginfo_t));
5283 }
5284 }
5285 break;
5286 #endif
5287 #ifdef TARGET_NR_creat /* not on alpha */
5288 case TARGET_NR_creat:
5289 if (!(p = lock_user_string(arg1)))
5290 goto efault;
5291 ret = get_errno(creat(p, arg2));
5292 unlock_user(p, arg1, 0);
5293 break;
5294 #endif
5295 case TARGET_NR_link:
5296 {
5297 void * p2;
5298 p = lock_user_string(arg1);
5299 p2 = lock_user_string(arg2);
5300 if (!p || !p2)
5301 ret = -TARGET_EFAULT;
5302 else
5303 ret = get_errno(link(p, p2));
5304 unlock_user(p2, arg2, 0);
5305 unlock_user(p, arg1, 0);
5306 }
5307 break;
5308 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5309 case TARGET_NR_linkat:
5310 {
5311 void * p2 = NULL;
5312 if (!arg2 || !arg4)
5313 goto efault;
5314 p = lock_user_string(arg2);
5315 p2 = lock_user_string(arg4);
5316 if (!p || !p2)
5317 ret = -TARGET_EFAULT;
5318 else
5319 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
5320 unlock_user(p, arg2, 0);
5321 unlock_user(p2, arg4, 0);
5322 }
5323 break;
5324 #endif
5325 case TARGET_NR_unlink:
5326 if (!(p = lock_user_string(arg1)))
5327 goto efault;
5328 ret = get_errno(unlink(p));
5329 unlock_user(p, arg1, 0);
5330 break;
5331 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5332 case TARGET_NR_unlinkat:
5333 if (!(p = lock_user_string(arg2)))
5334 goto efault;
5335 ret = get_errno(sys_unlinkat(arg1, p, arg3));
5336 unlock_user(p, arg2, 0);
5337 break;
5338 #endif
5339 case TARGET_NR_execve:
5340 {
5341 char **argp, **envp;
5342 int argc, envc;
5343 abi_ulong gp;
5344 abi_ulong guest_argp;
5345 abi_ulong guest_envp;
5346 abi_ulong addr;
5347 char **q;
5348 int total_size = 0;
5349
5350 argc = 0;
5351 guest_argp = arg2;
5352 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5353 if (get_user_ual(addr, gp))
5354 goto efault;
5355 if (!addr)
5356 break;
5357 argc++;
5358 }
5359 envc = 0;
5360 guest_envp = arg3;
5361 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5362 if (get_user_ual(addr, gp))
5363 goto efault;
5364 if (!addr)
5365 break;
5366 envc++;
5367 }
5368
5369 argp = alloca((argc + 1) * sizeof(void *));
5370 envp = alloca((envc + 1) * sizeof(void *));
5371
5372 for (gp = guest_argp, q = argp; gp;
5373 gp += sizeof(abi_ulong), q++) {
5374 if (get_user_ual(addr, gp))
5375 goto execve_efault;
5376 if (!addr)
5377 break;
5378 if (!(*q = lock_user_string(addr)))
5379 goto execve_efault;
5380 total_size += strlen(*q) + 1;
5381 }
5382 *q = NULL;
5383
5384 for (gp = guest_envp, q = envp; gp;
5385 gp += sizeof(abi_ulong), q++) {
5386 if (get_user_ual(addr, gp))
5387 goto execve_efault;
5388 if (!addr)
5389 break;
5390 if (!(*q = lock_user_string(addr)))
5391 goto execve_efault;
5392 total_size += strlen(*q) + 1;
5393 }
5394 *q = NULL;
5395
5396 /* This case will not be caught by the host's execve() if its
5397 page size is bigger than the target's. */
5398 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5399 ret = -TARGET_E2BIG;
5400 goto execve_end;
5401 }
5402 if (!(p = lock_user_string(arg1)))
5403 goto execve_efault;
5404 ret = get_errno(execve(p, argp, envp));
5405 unlock_user(p, arg1, 0);
5406
5407 goto execve_end;
5408
5409 execve_efault:
5410 ret = -TARGET_EFAULT;
5411
5412 execve_end:
5413 for (gp = guest_argp, q = argp; *q;
5414 gp += sizeof(abi_ulong), q++) {
5415 if (get_user_ual(addr, gp)
5416 || !addr)
5417 break;
5418 unlock_user(*q, addr, 0);
5419 }
5420 for (gp = guest_envp, q = envp; *q;
5421 gp += sizeof(abi_ulong), q++) {
5422 if (get_user_ual(addr, gp)
5423 || !addr)
5424 break;
5425 unlock_user(*q, addr, 0);
5426 }
5427 }
5428 break;
5429 case TARGET_NR_chdir:
5430 if (!(p = lock_user_string(arg1)))
5431 goto efault;
5432 ret = get_errno(chdir(p));
5433 unlock_user(p, arg1, 0);
5434 break;
5435 #ifdef TARGET_NR_time
5436 case TARGET_NR_time:
5437 {
5438 time_t host_time;
5439 ret = get_errno(time(&host_time));
5440 if (!is_error(ret)
5441 && arg1
5442 && put_user_sal(host_time, arg1))
5443 goto efault;
5444 }
5445 break;
5446 #endif
5447 case TARGET_NR_mknod:
5448 if (!(p = lock_user_string(arg1)))
5449 goto efault;
5450 ret = get_errno(mknod(p, arg2, arg3));
5451 unlock_user(p, arg1, 0);
5452 break;
5453 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5454 case TARGET_NR_mknodat:
5455 if (!(p = lock_user_string(arg2)))
5456 goto efault;
5457 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5458 unlock_user(p, arg2, 0);
5459 break;
5460 #endif
5461 case TARGET_NR_chmod:
5462 if (!(p = lock_user_string(arg1)))
5463 goto efault;
5464 ret = get_errno(chmod(p, arg2));
5465 unlock_user(p, arg1, 0);
5466 break;
5467 #ifdef TARGET_NR_break
5468 case TARGET_NR_break:
5469 goto unimplemented;
5470 #endif
5471 #ifdef TARGET_NR_oldstat
5472 case TARGET_NR_oldstat:
5473 goto unimplemented;
5474 #endif
5475 case TARGET_NR_lseek:
5476 ret = get_errno(lseek(arg1, arg2, arg3));
5477 break;
5478 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5479 /* Alpha specific */
5480 case TARGET_NR_getxpid:
5481 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5482 ret = get_errno(getpid());
5483 break;
5484 #endif
5485 #ifdef TARGET_NR_getpid
5486 case TARGET_NR_getpid:
5487 ret = get_errno(getpid());
5488 break;
5489 #endif
5490 case TARGET_NR_mount:
5491 {
5492 /* need to look at the data field */
5493 void *p2, *p3;
5494 p = lock_user_string(arg1);
5495 p2 = lock_user_string(arg2);
5496 p3 = lock_user_string(arg3);
5497 if (!p || !p2 || !p3)
5498 ret = -TARGET_EFAULT;
5499 else {
5500 /* FIXME - arg5 should be locked, but it isn't clear how to
5501 * do that since it's not guaranteed to be a NULL-terminated
5502 * string.
5503 */
5504 if ( ! arg5 )
5505 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5506 else
5507 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5508 }
5509 unlock_user(p, arg1, 0);
5510 unlock_user(p2, arg2, 0);
5511 unlock_user(p3, arg3, 0);
5512 break;
5513 }
5514 #ifdef TARGET_NR_umount
5515 case TARGET_NR_umount:
5516 if (!(p = lock_user_string(arg1)))
5517 goto efault;
5518 ret = get_errno(umount(p));
5519 unlock_user(p, arg1, 0);
5520 break;
5521 #endif
5522 #ifdef TARGET_NR_stime /* not on alpha */
5523 case TARGET_NR_stime:
5524 {
5525 time_t host_time;
5526 if (get_user_sal(host_time, arg1))
5527 goto efault;
5528 ret = get_errno(stime(&host_time));
5529 }
5530 break;
5531 #endif
5532 case TARGET_NR_ptrace:
5533 goto unimplemented;
5534 #ifdef TARGET_NR_alarm /* not on alpha */
5535 case TARGET_NR_alarm:
5536 ret = alarm(arg1);
5537 break;
5538 #endif
5539 #ifdef TARGET_NR_oldfstat
5540 case TARGET_NR_oldfstat:
5541 goto unimplemented;
5542 #endif
5543 #ifdef TARGET_NR_pause /* not on alpha */
5544 case TARGET_NR_pause:
5545 ret = get_errno(pause());
5546 break;
5547 #endif
5548 #ifdef TARGET_NR_utime
5549 case TARGET_NR_utime:
5550 {
5551 struct utimbuf tbuf, *host_tbuf;
5552 struct target_utimbuf *target_tbuf;
5553 if (arg2) {
5554 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5555 goto efault;
5556 tbuf.actime = tswapal(target_tbuf->actime);
5557 tbuf.modtime = tswapal(target_tbuf->modtime);
5558 unlock_user_struct(target_tbuf, arg2, 0);
5559 host_tbuf = &tbuf;
5560 } else {
5561 host_tbuf = NULL;
5562 }
5563 if (!(p = lock_user_string(arg1)))
5564 goto efault;
5565 ret = get_errno(utime(p, host_tbuf));
5566 unlock_user(p, arg1, 0);
5567 }
5568 break;
5569 #endif
5570 case TARGET_NR_utimes:
5571 {
5572 struct timeval *tvp, tv[2];
5573 if (arg2) {
5574 if (copy_from_user_timeval(&tv[0], arg2)
5575 || copy_from_user_timeval(&tv[1],
5576 arg2 + sizeof(struct target_timeval)))
5577 goto efault;
5578 tvp = tv;
5579 } else {
5580 tvp = NULL;
5581 }
5582 if (!(p = lock_user_string(arg1)))
5583 goto efault;
5584 ret = get_errno(utimes(p, tvp));
5585 unlock_user(p, arg1, 0);
5586 }
5587 break;
5588 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5589 case TARGET_NR_futimesat:
5590 {
5591 struct timeval *tvp, tv[2];
5592 if (arg3) {
5593 if (copy_from_user_timeval(&tv[0], arg3)
5594 || copy_from_user_timeval(&tv[1],
5595 arg3 + sizeof(struct target_timeval)))
5596 goto efault;
5597 tvp = tv;
5598 } else {
5599 tvp = NULL;
5600 }
5601 if (!(p = lock_user_string(arg2)))
5602 goto efault;
5603 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5604 unlock_user(p, arg2, 0);
5605 }
5606 break;
5607 #endif
5608 #ifdef TARGET_NR_stty
5609 case TARGET_NR_stty:
5610 goto unimplemented;
5611 #endif
5612 #ifdef TARGET_NR_gtty
5613 case TARGET_NR_gtty:
5614 goto unimplemented;
5615 #endif
5616 case TARGET_NR_access:
5617 if (!(p = lock_user_string(arg1)))
5618 goto efault;
5619 ret = get_errno(access(path(p), arg2));
5620 unlock_user(p, arg1, 0);
5621 break;
5622 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5623 case TARGET_NR_faccessat:
5624 if (!(p = lock_user_string(arg2)))
5625 goto efault;
5626 ret = get_errno(sys_faccessat(arg1, p, arg3));
5627 unlock_user(p, arg2, 0);
5628 break;
5629 #endif
5630 #ifdef TARGET_NR_nice /* not on alpha */
5631 case TARGET_NR_nice:
5632 ret = get_errno(nice(arg1));
5633 break;
5634 #endif
5635 #ifdef TARGET_NR_ftime
5636 case TARGET_NR_ftime:
5637 goto unimplemented;
5638 #endif
5639 case TARGET_NR_sync:
5640 sync();
5641 ret = 0;
5642 break;
5643 case TARGET_NR_kill:
5644 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5645 break;
5646 case TARGET_NR_rename:
5647 {
5648 void *p2;
5649 p = lock_user_string(arg1);
5650 p2 = lock_user_string(arg2);
5651 if (!p || !p2)
5652 ret = -TARGET_EFAULT;
5653 else
5654 ret = get_errno(rename(p, p2));
5655 unlock_user(p2, arg2, 0);
5656 unlock_user(p, arg1, 0);
5657 }
5658 break;
5659 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5660 case TARGET_NR_renameat:
5661 {
5662 void *p2;
5663 p = lock_user_string(arg2);
5664 p2 = lock_user_string(arg4);
5665 if (!p || !p2)
5666 ret = -TARGET_EFAULT;
5667 else
5668 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5669 unlock_user(p2, arg4, 0);
5670 unlock_user(p, arg2, 0);
5671 }
5672 break;
5673 #endif
5674 case TARGET_NR_mkdir:
5675 if (!(p = lock_user_string(arg1)))
5676 goto efault;
5677 ret = get_errno(mkdir(p, arg2));
5678 unlock_user(p, arg1, 0);
5679 break;
5680 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5681 case TARGET_NR_mkdirat:
5682 if (!(p = lock_user_string(arg2)))
5683 goto efault;
5684 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5685 unlock_user(p, arg2, 0);
5686 break;
5687 #endif
5688 case TARGET_NR_rmdir:
5689 if (!(p = lock_user_string(arg1)))
5690 goto efault;
5691 ret = get_errno(rmdir(p));
5692 unlock_user(p, arg1, 0);
5693 break;
5694 case TARGET_NR_dup:
5695 ret = get_errno(dup(arg1));
5696 break;
5697 case TARGET_NR_pipe:
5698 ret = do_pipe(cpu_env, arg1, 0, 0);
5699 break;
5700 #ifdef TARGET_NR_pipe2
5701 case TARGET_NR_pipe2:
5702 ret = do_pipe(cpu_env, arg1,
5703 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5704 break;
5705 #endif
5706 case TARGET_NR_times:
5707 {
5708 struct target_tms *tmsp;
5709 struct tms tms;
5710 ret = get_errno(times(&tms));
5711 if (arg1) {
5712 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5713 if (!tmsp)
5714 goto efault;
5715 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5716 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5717 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5718 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5719 }
5720 if (!is_error(ret))
5721 ret = host_to_target_clock_t(ret);
5722 }
5723 break;
5724 #ifdef TARGET_NR_prof
5725 case TARGET_NR_prof:
5726 goto unimplemented;
5727 #endif
5728 #ifdef TARGET_NR_signal
5729 case TARGET_NR_signal:
5730 goto unimplemented;
5731 #endif
5732 case TARGET_NR_acct:
5733 if (arg1 == 0) {
5734 ret = get_errno(acct(NULL));
5735 } else {
5736 if (!(p = lock_user_string(arg1)))
5737 goto efault;
5738 ret = get_errno(acct(path(p)));
5739 unlock_user(p, arg1, 0);
5740 }
5741 break;
5742 #ifdef TARGET_NR_umount2 /* not on alpha */
5743 case TARGET_NR_umount2:
5744 if (!(p = lock_user_string(arg1)))
5745 goto efault;
5746 ret = get_errno(umount2(p, arg2));
5747 unlock_user(p, arg1, 0);
5748 break;
5749 #endif
5750 #ifdef TARGET_NR_lock
5751 case TARGET_NR_lock:
5752 goto unimplemented;
5753 #endif
5754 case TARGET_NR_ioctl:
5755 ret = do_ioctl(arg1, arg2, arg3);
5756 break;
5757 case TARGET_NR_fcntl:
5758 ret = do_fcntl(arg1, arg2, arg3);
5759 break;
5760 #ifdef TARGET_NR_mpx
5761 case TARGET_NR_mpx:
5762 goto unimplemented;
5763 #endif
5764 case TARGET_NR_setpgid:
5765 ret = get_errno(setpgid(arg1, arg2));
5766 break;
5767 #ifdef TARGET_NR_ulimit
5768 case TARGET_NR_ulimit:
5769 goto unimplemented;
5770 #endif
5771 #ifdef TARGET_NR_oldolduname
5772 case TARGET_NR_oldolduname:
5773 goto unimplemented;
5774 #endif
5775 case TARGET_NR_umask:
5776 ret = get_errno(umask(arg1));
5777 break;
5778 case TARGET_NR_chroot:
5779 if (!(p = lock_user_string(arg1)))
5780 goto efault;
5781 ret = get_errno(chroot(p));
5782 unlock_user(p, arg1, 0);
5783 break;
5784 case TARGET_NR_ustat:
5785 goto unimplemented;
5786 case TARGET_NR_dup2:
5787 ret = get_errno(dup2(arg1, arg2));
5788 break;
5789 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5790 case TARGET_NR_dup3:
5791 ret = get_errno(dup3(arg1, arg2, arg3));
5792 break;
5793 #endif
5794 #ifdef TARGET_NR_getppid /* not on alpha */
5795 case TARGET_NR_getppid:
5796 ret = get_errno(getppid());
5797 break;
5798 #endif
5799 case TARGET_NR_getpgrp:
5800 ret = get_errno(getpgrp());
5801 break;
5802 case TARGET_NR_setsid:
5803 ret = get_errno(setsid());
5804 break;
5805 #ifdef TARGET_NR_sigaction
5806 case TARGET_NR_sigaction:
5807 {
5808 #if defined(TARGET_ALPHA)
5809 struct target_sigaction act, oact, *pact = 0;
5810 struct target_old_sigaction *old_act;
5811 if (arg2) {
5812 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5813 goto efault;
5814 act._sa_handler = old_act->_sa_handler;
5815 target_siginitset(&act.sa_mask, old_act->sa_mask);
5816 act.sa_flags = old_act->sa_flags;
5817 act.sa_restorer = 0;
5818 unlock_user_struct(old_act, arg2, 0);
5819 pact = &act;
5820 }
5821 ret = get_errno(do_sigaction(arg1, pact, &oact));
5822 if (!is_error(ret) && arg3) {
5823 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5824 goto efault;
5825 old_act->_sa_handler = oact._sa_handler;
5826 old_act->sa_mask = oact.sa_mask.sig[0];
5827 old_act->sa_flags = oact.sa_flags;
5828 unlock_user_struct(old_act, arg3, 1);
5829 }
5830 #elif defined(TARGET_MIPS)
5831 struct target_sigaction act, oact, *pact, *old_act;
5832
5833 if (arg2) {
5834 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5835 goto efault;
5836 act._sa_handler = old_act->_sa_handler;
5837 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5838 act.sa_flags = old_act->sa_flags;
5839 unlock_user_struct(old_act, arg2, 0);
5840 pact = &act;
5841 } else {
5842 pact = NULL;
5843 }
5844
5845 ret = get_errno(do_sigaction(arg1, pact, &oact));
5846
5847 if (!is_error(ret) && arg3) {
5848 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5849 goto efault;
5850 old_act->_sa_handler = oact._sa_handler;
5851 old_act->sa_flags = oact.sa_flags;
5852 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5853 old_act->sa_mask.sig[1] = 0;
5854 old_act->sa_mask.sig[2] = 0;
5855 old_act->sa_mask.sig[3] = 0;
5856 unlock_user_struct(old_act, arg3, 1);
5857 }
5858 #else
5859 struct target_old_sigaction *old_act;
5860 struct target_sigaction act, oact, *pact;
5861 if (arg2) {
5862 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5863 goto efault;
5864 act._sa_handler = old_act->_sa_handler;
5865 target_siginitset(&act.sa_mask, old_act->sa_mask);
5866 act.sa_flags = old_act->sa_flags;
5867 act.sa_restorer = old_act->sa_restorer;
5868 unlock_user_struct(old_act, arg2, 0);
5869 pact = &act;
5870 } else {
5871 pact = NULL;
5872 }
5873 ret = get_errno(do_sigaction(arg1, pact, &oact));
5874 if (!is_error(ret) && arg3) {
5875 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5876 goto efault;
5877 old_act->_sa_handler = oact._sa_handler;
5878 old_act->sa_mask = oact.sa_mask.sig[0];
5879 old_act->sa_flags = oact.sa_flags;
5880 old_act->sa_restorer = oact.sa_restorer;
5881 unlock_user_struct(old_act, arg3, 1);
5882 }
5883 #endif
5884 }
5885 break;
5886 #endif
5887 case TARGET_NR_rt_sigaction:
5888 {
5889 #if defined(TARGET_ALPHA)
5890 struct target_sigaction act, oact, *pact = 0;
5891 struct target_rt_sigaction *rt_act;
5892 /* ??? arg4 == sizeof(sigset_t). */
5893 if (arg2) {
5894 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5895 goto efault;
5896 act._sa_handler = rt_act->_sa_handler;
5897 act.sa_mask = rt_act->sa_mask;
5898 act.sa_flags = rt_act->sa_flags;
5899 act.sa_restorer = arg5;
5900 unlock_user_struct(rt_act, arg2, 0);
5901 pact = &act;
5902 }
5903 ret = get_errno(do_sigaction(arg1, pact, &oact));
5904 if (!is_error(ret) && arg3) {
5905 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5906 goto efault;
5907 rt_act->_sa_handler = oact._sa_handler;
5908 rt_act->sa_mask = oact.sa_mask;
5909 rt_act->sa_flags = oact.sa_flags;
5910 unlock_user_struct(rt_act, arg3, 1);
5911 }
5912 #else
5913 struct target_sigaction *act;
5914 struct target_sigaction *oact;
5915
5916 if (arg2) {
5917 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5918 goto efault;
5919 } else
5920 act = NULL;
5921 if (arg3) {
5922 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5923 ret = -TARGET_EFAULT;
5924 goto rt_sigaction_fail;
5925 }
5926 } else
5927 oact = NULL;
5928 ret = get_errno(do_sigaction(arg1, act, oact));
5929 rt_sigaction_fail:
5930 if (act)
5931 unlock_user_struct(act, arg2, 0);
5932 if (oact)
5933 unlock_user_struct(oact, arg3, 1);
5934 #endif
5935 }
5936 break;
5937 #ifdef TARGET_NR_sgetmask /* not on alpha */
5938 case TARGET_NR_sgetmask:
5939 {
5940 sigset_t cur_set;
5941 abi_ulong target_set;
5942 sigprocmask(0, NULL, &cur_set);
5943 host_to_target_old_sigset(&target_set, &cur_set);
5944 ret = target_set;
5945 }
5946 break;
5947 #endif
5948 #ifdef TARGET_NR_ssetmask /* not on alpha */
5949 case TARGET_NR_ssetmask:
5950 {
5951 sigset_t set, oset, cur_set;
5952 abi_ulong target_set = arg1;
5953 sigprocmask(0, NULL, &cur_set);
5954 target_to_host_old_sigset(&set, &target_set);
5955 sigorset(&set, &set, &cur_set);
5956 sigprocmask(SIG_SETMASK, &set, &oset);
5957 host_to_target_old_sigset(&target_set, &oset);
5958 ret = target_set;
5959 }
5960 break;
5961 #endif
5962 #ifdef TARGET_NR_sigprocmask
5963 case TARGET_NR_sigprocmask:
5964 {
5965 #if defined(TARGET_ALPHA)
5966 sigset_t set, oldset;
5967 abi_ulong mask;
5968 int how;
5969
5970 switch (arg1) {
5971 case TARGET_SIG_BLOCK:
5972 how = SIG_BLOCK;
5973 break;
5974 case TARGET_SIG_UNBLOCK:
5975 how = SIG_UNBLOCK;
5976 break;
5977 case TARGET_SIG_SETMASK:
5978 how = SIG_SETMASK;
5979 break;
5980 default:
5981 ret = -TARGET_EINVAL;
5982 goto fail;
5983 }
5984 mask = arg2;
5985 target_to_host_old_sigset(&set, &mask);
5986
5987 ret = get_errno(sigprocmask(how, &set, &oldset));
5988 if (!is_error(ret)) {
5989 host_to_target_old_sigset(&mask, &oldset);
5990 ret = mask;
5991 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5992 }
5993 #else
5994 sigset_t set, oldset, *set_ptr;
5995 int how;
5996
5997 if (arg2) {
5998 switch (arg1) {
5999 case TARGET_SIG_BLOCK:
6000 how = SIG_BLOCK;
6001 break;
6002 case TARGET_SIG_UNBLOCK:
6003 how = SIG_UNBLOCK;
6004 break;
6005 case TARGET_SIG_SETMASK:
6006 how = SIG_SETMASK;
6007 break;
6008 default:
6009 ret = -TARGET_EINVAL;
6010 goto fail;
6011 }
6012 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6013 goto efault;
6014 target_to_host_old_sigset(&set, p);
6015 unlock_user(p, arg2, 0);
6016 set_ptr = &set;
6017 } else {
6018 how = 0;
6019 set_ptr = NULL;
6020 }
6021 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6022 if (!is_error(ret) && arg3) {
6023 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6024 goto efault;
6025 host_to_target_old_sigset(p, &oldset);
6026 unlock_user(p, arg3, sizeof(target_sigset_t));
6027 }
6028 #endif
6029 }
6030 break;
6031 #endif
6032 case TARGET_NR_rt_sigprocmask:
6033 {
6034 int how = arg1;
6035 sigset_t set, oldset, *set_ptr;
6036
6037 if (arg2) {
6038 switch(how) {
6039 case TARGET_SIG_BLOCK:
6040 how = SIG_BLOCK;
6041 break;
6042 case TARGET_SIG_UNBLOCK:
6043 how = SIG_UNBLOCK;
6044 break;
6045 case TARGET_SIG_SETMASK:
6046 how = SIG_SETMASK;
6047 break;
6048 default:
6049 ret = -TARGET_EINVAL;
6050 goto fail;
6051 }
6052 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6053 goto efault;
6054 target_to_host_sigset(&set, p);
6055 unlock_user(p, arg2, 0);
6056 set_ptr = &set;
6057 } else {
6058 how = 0;
6059 set_ptr = NULL;
6060 }
6061 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6062 if (!is_error(ret) && arg3) {
6063 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6064 goto efault;
6065 host_to_target_sigset(p, &oldset);
6066 unlock_user(p, arg3, sizeof(target_sigset_t));
6067 }
6068 }
6069 break;
6070 #ifdef TARGET_NR_sigpending
6071 case TARGET_NR_sigpending:
6072 {
6073 sigset_t set;
6074 ret = get_errno(sigpending(&set));
6075 if (!is_error(ret)) {
6076 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6077 goto efault;
6078 host_to_target_old_sigset(p, &set);
6079 unlock_user(p, arg1, sizeof(target_sigset_t));
6080 }
6081 }
6082 break;
6083 #endif
6084 case TARGET_NR_rt_sigpending:
6085 {
6086 sigset_t set;
6087 ret = get_errno(sigpending(&set));
6088 if (!is_error(ret)) {
6089 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6090 goto efault;
6091 host_to_target_sigset(p, &set);
6092 unlock_user(p, arg1, sizeof(target_sigset_t));
6093 }
6094 }
6095 break;
6096 #ifdef TARGET_NR_sigsuspend
6097 case TARGET_NR_sigsuspend:
6098 {
6099 sigset_t set;
6100 #if defined(TARGET_ALPHA)
6101 abi_ulong mask = arg1;
6102 target_to_host_old_sigset(&set, &mask);
6103 #else
6104 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6105 goto efault;
6106 target_to_host_old_sigset(&set, p);
6107 unlock_user(p, arg1, 0);
6108 #endif
6109 ret = get_errno(sigsuspend(&set));
6110 }
6111 break;
6112 #endif
6113 case TARGET_NR_rt_sigsuspend:
6114 {
6115 sigset_t set;
6116 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6117 goto efault;
6118 target_to_host_sigset(&set, p);
6119 unlock_user(p, arg1, 0);
6120 ret = get_errno(sigsuspend(&set));
6121 }
6122 break;
6123 case TARGET_NR_rt_sigtimedwait:
6124 {
6125 sigset_t set;
6126 struct timespec uts, *puts;
6127 siginfo_t uinfo;
6128
6129 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6130 goto efault;
6131 target_to_host_sigset(&set, p);
6132 unlock_user(p, arg1, 0);
6133 if (arg3) {
6134 puts = &uts;
6135 target_to_host_timespec(puts, arg3);
6136 } else {
6137 puts = NULL;
6138 }
6139 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6140 if (!is_error(ret) && arg2) {
6141 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6142 goto efault;
6143 host_to_target_siginfo(p, &uinfo);
6144 unlock_user(p, arg2, sizeof(target_siginfo_t));
6145 }
6146 }
6147 break;
6148 case TARGET_NR_rt_sigqueueinfo:
6149 {
6150 siginfo_t uinfo;
6151 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6152 goto efault;
6153 target_to_host_siginfo(&uinfo, p);
6154 unlock_user(p, arg1, 0);
6155 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6156 }
6157 break;
6158 #ifdef TARGET_NR_sigreturn
6159 case TARGET_NR_sigreturn:
6160 /* NOTE: ret is eax, so not transcoding must be done */
6161 ret = do_sigreturn(cpu_env);
6162 break;
6163 #endif
6164 case TARGET_NR_rt_sigreturn:
6165 /* NOTE: ret is eax, so not transcoding must be done */
6166 ret = do_rt_sigreturn(cpu_env);
6167 break;
6168 case TARGET_NR_sethostname:
6169 if (!(p = lock_user_string(arg1)))
6170 goto efault;
6171 ret = get_errno(sethostname(p, arg2));
6172 unlock_user(p, arg1, 0);
6173 break;
6174 case TARGET_NR_setrlimit:
6175 {
6176 int resource = target_to_host_resource(arg1);
6177 struct target_rlimit *target_rlim;
6178 struct rlimit rlim;
6179 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6180 goto efault;
6181 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6182 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6183 unlock_user_struct(target_rlim, arg2, 0);
6184 ret = get_errno(setrlimit(resource, &rlim));
6185 }
6186 break;
6187 case TARGET_NR_getrlimit:
6188 {
6189 int resource = target_to_host_resource(arg1);
6190 struct target_rlimit *target_rlim;
6191 struct rlimit rlim;
6192
6193 ret = get_errno(getrlimit(resource, &rlim));
6194 if (!is_error(ret)) {
6195 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6196 goto efault;
6197 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6198 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6199 unlock_user_struct(target_rlim, arg2, 1);
6200 }
6201 }
6202 break;
6203 case TARGET_NR_getrusage:
6204 {
6205 struct rusage rusage;
6206 ret = get_errno(getrusage(arg1, &rusage));
6207 if (!is_error(ret)) {
6208 host_to_target_rusage(arg2, &rusage);
6209 }
6210 }
6211 break;
6212 case TARGET_NR_gettimeofday:
6213 {
6214 struct timeval tv;
6215 ret = get_errno(gettimeofday(&tv, NULL));
6216 if (!is_error(ret)) {
6217 if (copy_to_user_timeval(arg1, &tv))
6218 goto efault;
6219 }
6220 }
6221 break;
6222 case TARGET_NR_settimeofday:
6223 {
6224 struct timeval tv;
6225 if (copy_from_user_timeval(&tv, arg1))
6226 goto efault;
6227 ret = get_errno(settimeofday(&tv, NULL));
6228 }
6229 break;
6230 #if defined(TARGET_NR_select)
6231 case TARGET_NR_select:
6232 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6233 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6234 #else
6235 {
6236 struct target_sel_arg_struct *sel;
6237 abi_ulong inp, outp, exp, tvp;
6238 long nsel;
6239
6240 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6241 goto efault;
6242 nsel = tswapal(sel->n);
6243 inp = tswapal(sel->inp);
6244 outp = tswapal(sel->outp);
6245 exp = tswapal(sel->exp);
6246 tvp = tswapal(sel->tvp);
6247 unlock_user_struct(sel, arg1, 0);
6248 ret = do_select(nsel, inp, outp, exp, tvp);
6249 }
6250 #endif
6251 break;
6252 #endif
6253 #ifdef TARGET_NR_pselect6
6254 case TARGET_NR_pselect6:
6255 {
6256 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6257 fd_set rfds, wfds, efds;
6258 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6259 struct timespec ts, *ts_ptr;
6260
6261 /*
6262 * The 6th arg is actually two args smashed together,
6263 * so we cannot use the C library.
6264 */
6265 sigset_t set;
6266 struct {
6267 sigset_t *set;
6268 size_t size;
6269 } sig, *sig_ptr;
6270
6271 abi_ulong arg_sigset, arg_sigsize, *arg7;
6272 target_sigset_t *target_sigset;
6273
6274 n = arg1;
6275 rfd_addr = arg2;
6276 wfd_addr = arg3;
6277 efd_addr = arg4;
6278 ts_addr = arg5;
6279
6280 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6281 if (ret) {
6282 goto fail;
6283 }
6284 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6285 if (ret) {
6286 goto fail;
6287 }
6288 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6289 if (ret) {
6290 goto fail;
6291 }
6292
6293 /*
6294 * This takes a timespec, and not a timeval, so we cannot
6295 * use the do_select() helper ...
6296 */
6297 if (ts_addr) {
6298 if (target_to_host_timespec(&ts, ts_addr)) {
6299 goto efault;
6300 }
6301 ts_ptr = &ts;
6302 } else {
6303 ts_ptr = NULL;
6304 }
6305
6306 /* Extract the two packed args for the sigset */
6307 if (arg6) {
6308 sig_ptr = &sig;
6309 sig.size = _NSIG / 8;
6310
6311 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6312 if (!arg7) {
6313 goto efault;
6314 }
6315 arg_sigset = tswapal(arg7[0]);
6316 arg_sigsize = tswapal(arg7[1]);
6317 unlock_user(arg7, arg6, 0);
6318
6319 if (arg_sigset) {
6320 sig.set = &set;
6321 if (arg_sigsize != sizeof(*target_sigset)) {
6322 /* Like the kernel, we enforce correct size sigsets */
6323 ret = -TARGET_EINVAL;
6324 goto fail;
6325 }
6326 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6327 sizeof(*target_sigset), 1);
6328 if (!target_sigset) {
6329 goto efault;
6330 }
6331 target_to_host_sigset(&set, target_sigset);
6332 unlock_user(target_sigset, arg_sigset, 0);
6333 } else {
6334 sig.set = NULL;
6335 }
6336 } else {
6337 sig_ptr = NULL;
6338 }
6339
6340 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6341 ts_ptr, sig_ptr));
6342
6343 if (!is_error(ret)) {
6344 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6345 goto efault;
6346 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6347 goto efault;
6348 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6349 goto efault;
6350
6351 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6352 goto efault;
6353 }
6354 }
6355 break;
6356 #endif
6357 case TARGET_NR_symlink:
6358 {
6359 void *p2;
6360 p = lock_user_string(arg1);
6361 p2 = lock_user_string(arg2);
6362 if (!p || !p2)
6363 ret = -TARGET_EFAULT;
6364 else
6365 ret = get_errno(symlink(p, p2));
6366 unlock_user(p2, arg2, 0);
6367 unlock_user(p, arg1, 0);
6368 }
6369 break;
6370 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6371 case TARGET_NR_symlinkat:
6372 {
6373 void *p2;
6374 p = lock_user_string(arg1);
6375 p2 = lock_user_string(arg3);
6376 if (!p || !p2)
6377 ret = -TARGET_EFAULT;
6378 else
6379 ret = get_errno(sys_symlinkat(p, arg2, p2));
6380 unlock_user(p2, arg3, 0);
6381 unlock_user(p, arg1, 0);
6382 }
6383 break;
6384 #endif
6385 #ifdef TARGET_NR_oldlstat
6386 case TARGET_NR_oldlstat:
6387 goto unimplemented;
6388 #endif
6389 case TARGET_NR_readlink:
6390 {
6391 void *p2, *temp;
6392 p = lock_user_string(arg1);
6393 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6394 if (!p || !p2)
6395 ret = -TARGET_EFAULT;
6396 else {
6397 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6398 char real[PATH_MAX];
6399 temp = realpath(exec_path,real);
6400 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6401 snprintf((char *)p2, arg3, "%s", real);
6402 }
6403 else
6404 ret = get_errno(readlink(path(p), p2, arg3));
6405 }
6406 unlock_user(p2, arg2, ret);
6407 unlock_user(p, arg1, 0);
6408 }
6409 break;
6410 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6411 case TARGET_NR_readlinkat:
6412 {
6413 void *p2;
6414 p = lock_user_string(arg2);
6415 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6416 if (!p || !p2)
6417 ret = -TARGET_EFAULT;
6418 else
6419 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6420 unlock_user(p2, arg3, ret);
6421 unlock_user(p, arg2, 0);
6422 }
6423 break;
6424 #endif
6425 #ifdef TARGET_NR_uselib
6426 case TARGET_NR_uselib:
6427 goto unimplemented;
6428 #endif
6429 #ifdef TARGET_NR_swapon
6430 case TARGET_NR_swapon:
6431 if (!(p = lock_user_string(arg1)))
6432 goto efault;
6433 ret = get_errno(swapon(p, arg2));
6434 unlock_user(p, arg1, 0);
6435 break;
6436 #endif
6437 case TARGET_NR_reboot:
6438 if (!(p = lock_user_string(arg4)))
6439 goto efault;
6440 ret = reboot(arg1, arg2, arg3, p);
6441 unlock_user(p, arg4, 0);
6442 break;
6443 #ifdef TARGET_NR_readdir
6444 case TARGET_NR_readdir:
6445 goto unimplemented;
6446 #endif
6447 #ifdef TARGET_NR_mmap
6448 case TARGET_NR_mmap:
6449 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6450 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6451 || defined(TARGET_S390X)
6452 {
6453 abi_ulong *v;
6454 abi_ulong v1, v2, v3, v4, v5, v6;
6455 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6456 goto efault;
6457 v1 = tswapal(v[0]);
6458 v2 = tswapal(v[1]);
6459 v3 = tswapal(v[2]);
6460 v4 = tswapal(v[3]);
6461 v5 = tswapal(v[4]);
6462 v6 = tswapal(v[5]);
6463 unlock_user(v, arg1, 0);
6464 ret = get_errno(target_mmap(v1, v2, v3,
6465 target_to_host_bitmask(v4, mmap_flags_tbl),
6466 v5, v6));
6467 }
6468 #else
6469 ret = get_errno(target_mmap(arg1, arg2, arg3,
6470 target_to_host_bitmask(arg4, mmap_flags_tbl),
6471 arg5,
6472 arg6));
6473 #endif
6474 break;
6475 #endif
6476 #ifdef TARGET_NR_mmap2
6477 case TARGET_NR_mmap2:
6478 #ifndef MMAP_SHIFT
6479 #define MMAP_SHIFT 12
6480 #endif
6481 ret = get_errno(target_mmap(arg1, arg2, arg3,
6482 target_to_host_bitmask(arg4, mmap_flags_tbl),
6483 arg5,
6484 arg6 << MMAP_SHIFT));
6485 break;
6486 #endif
6487 case TARGET_NR_munmap:
6488 ret = get_errno(target_munmap(arg1, arg2));
6489 break;
6490 case TARGET_NR_mprotect:
6491 {
6492 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6493 /* Special hack to detect libc making the stack executable. */
6494 if ((arg3 & PROT_GROWSDOWN)
6495 && arg1 >= ts->info->stack_limit
6496 && arg1 <= ts->info->start_stack) {
6497 arg3 &= ~PROT_GROWSDOWN;
6498 arg2 = arg2 + arg1 - ts->info->stack_limit;
6499 arg1 = ts->info->stack_limit;
6500 }
6501 }
6502 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6503 break;
6504 #ifdef TARGET_NR_mremap
6505 case TARGET_NR_mremap:
6506 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6507 break;
6508 #endif
6509 /* ??? msync/mlock/munlock are broken for softmmu. */
6510 #ifdef TARGET_NR_msync
6511 case TARGET_NR_msync:
6512 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6513 break;
6514 #endif
6515 #ifdef TARGET_NR_mlock
6516 case TARGET_NR_mlock:
6517 ret = get_errno(mlock(g2h(arg1), arg2));
6518 break;
6519 #endif
6520 #ifdef TARGET_NR_munlock
6521 case TARGET_NR_munlock:
6522 ret = get_errno(munlock(g2h(arg1), arg2));
6523 break;
6524 #endif
6525 #ifdef TARGET_NR_mlockall
6526 case TARGET_NR_mlockall:
6527 ret = get_errno(mlockall(arg1));
6528 break;
6529 #endif
6530 #ifdef TARGET_NR_munlockall
6531 case TARGET_NR_munlockall:
6532 ret = get_errno(munlockall());
6533 break;
6534 #endif
6535 case TARGET_NR_truncate:
6536 if (!(p = lock_user_string(arg1)))
6537 goto efault;
6538 ret = get_errno(truncate(p, arg2));
6539 unlock_user(p, arg1, 0);
6540 break;
6541 case TARGET_NR_ftruncate:
6542 ret = get_errno(ftruncate(arg1, arg2));
6543 break;
6544 case TARGET_NR_fchmod:
6545 ret = get_errno(fchmod(arg1, arg2));
6546 break;
6547 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6548 case TARGET_NR_fchmodat:
6549 if (!(p = lock_user_string(arg2)))
6550 goto efault;
6551 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6552 unlock_user(p, arg2, 0);
6553 break;
6554 #endif
6555 case TARGET_NR_getpriority:
6556 /* Note that negative values are valid for getpriority, so we must
6557 differentiate based on errno settings. */
6558 errno = 0;
6559 ret = getpriority(arg1, arg2);
6560 if (ret == -1 && errno != 0) {
6561 ret = -host_to_target_errno(errno);
6562 break;
6563 }
6564 #ifdef TARGET_ALPHA
6565 /* Return value is the unbiased priority. Signal no error. */
6566 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6567 #else
6568 /* Return value is a biased priority to avoid negative numbers. */
6569 ret = 20 - ret;
6570 #endif
6571 break;
6572 case TARGET_NR_setpriority:
6573 ret = get_errno(setpriority(arg1, arg2, arg3));
6574 break;
6575 #ifdef TARGET_NR_profil
6576 case TARGET_NR_profil:
6577 goto unimplemented;
6578 #endif
6579 case TARGET_NR_statfs:
6580 if (!(p = lock_user_string(arg1)))
6581 goto efault;
6582 ret = get_errno(statfs(path(p), &stfs));
6583 unlock_user(p, arg1, 0);
6584 convert_statfs:
6585 if (!is_error(ret)) {
6586 struct target_statfs *target_stfs;
6587
6588 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6589 goto efault;
6590 __put_user(stfs.f_type, &target_stfs->f_type);
6591 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6592 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6593 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6594 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6595 __put_user(stfs.f_files, &target_stfs->f_files);
6596 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6597 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6598 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6599 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6600 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6601 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6602 unlock_user_struct(target_stfs, arg2, 1);
6603 }
6604 break;
6605 case TARGET_NR_fstatfs:
6606 ret = get_errno(fstatfs(arg1, &stfs));
6607 goto convert_statfs;
6608 #ifdef TARGET_NR_statfs64
6609 case TARGET_NR_statfs64:
6610 if (!(p = lock_user_string(arg1)))
6611 goto efault;
6612 ret = get_errno(statfs(path(p), &stfs));
6613 unlock_user(p, arg1, 0);
6614 convert_statfs64:
6615 if (!is_error(ret)) {
6616 struct target_statfs64 *target_stfs;
6617
6618 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6619 goto efault;
6620 __put_user(stfs.f_type, &target_stfs->f_type);
6621 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6622 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6623 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6624 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6625 __put_user(stfs.f_files, &target_stfs->f_files);
6626 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6627 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6628 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6629 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6630 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6631 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6632 unlock_user_struct(target_stfs, arg3, 1);
6633 }
6634 break;
6635 case TARGET_NR_fstatfs64:
6636 ret = get_errno(fstatfs(arg1, &stfs));
6637 goto convert_statfs64;
6638 #endif
6639 #ifdef TARGET_NR_ioperm
6640 case TARGET_NR_ioperm:
6641 goto unimplemented;
6642 #endif
6643 #ifdef TARGET_NR_socketcall
6644 case TARGET_NR_socketcall:
6645 ret = do_socketcall(arg1, arg2);
6646 break;
6647 #endif
6648 #ifdef TARGET_NR_accept
6649 case TARGET_NR_accept:
6650 ret = do_accept(arg1, arg2, arg3);
6651 break;
6652 #endif
6653 #ifdef TARGET_NR_bind
6654 case TARGET_NR_bind:
6655 ret = do_bind(arg1, arg2, arg3);
6656 break;
6657 #endif
6658 #ifdef TARGET_NR_connect
6659 case TARGET_NR_connect:
6660 ret = do_connect(arg1, arg2, arg3);
6661 break;
6662 #endif
6663 #ifdef TARGET_NR_getpeername
6664 case TARGET_NR_getpeername:
6665 ret = do_getpeername(arg1, arg2, arg3);
6666 break;
6667 #endif
6668 #ifdef TARGET_NR_getsockname
6669 case TARGET_NR_getsockname:
6670 ret = do_getsockname(arg1, arg2, arg3);
6671 break;
6672 #endif
6673 #ifdef TARGET_NR_getsockopt
6674 case TARGET_NR_getsockopt:
6675 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6676 break;
6677 #endif
6678 #ifdef TARGET_NR_listen
6679 case TARGET_NR_listen:
6680 ret = get_errno(listen(arg1, arg2));
6681 break;
6682 #endif
6683 #ifdef TARGET_NR_recv
6684 case TARGET_NR_recv:
6685 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6686 break;
6687 #endif
6688 #ifdef TARGET_NR_recvfrom
6689 case TARGET_NR_recvfrom:
6690 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6691 break;
6692 #endif
6693 #ifdef TARGET_NR_recvmsg
6694 case TARGET_NR_recvmsg:
6695 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6696 break;
6697 #endif
6698 #ifdef TARGET_NR_send
6699 case TARGET_NR_send:
6700 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6701 break;
6702 #endif
6703 #ifdef TARGET_NR_sendmsg
6704 case TARGET_NR_sendmsg:
6705 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6706 break;
6707 #endif
6708 #ifdef TARGET_NR_sendto
6709 case TARGET_NR_sendto:
6710 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6711 break;
6712 #endif
6713 #ifdef TARGET_NR_shutdown
6714 case TARGET_NR_shutdown:
6715 ret = get_errno(shutdown(arg1, arg2));
6716 break;
6717 #endif
6718 #ifdef TARGET_NR_socket
6719 case TARGET_NR_socket:
6720 ret = do_socket(arg1, arg2, arg3);
6721 break;
6722 #endif
6723 #ifdef TARGET_NR_socketpair
6724 case TARGET_NR_socketpair:
6725 ret = do_socketpair(arg1, arg2, arg3, arg4);
6726 break;
6727 #endif
6728 #ifdef TARGET_NR_setsockopt
6729 case TARGET_NR_setsockopt:
6730 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6731 break;
6732 #endif
6733
6734 case TARGET_NR_syslog:
6735 if (!(p = lock_user_string(arg2)))
6736 goto efault;
6737 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6738 unlock_user(p, arg2, 0);
6739 break;
6740
6741 case TARGET_NR_setitimer:
6742 {
6743 struct itimerval value, ovalue, *pvalue;
6744
6745 if (arg2) {
6746 pvalue = &value;
6747 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6748 || copy_from_user_timeval(&pvalue->it_value,
6749 arg2 + sizeof(struct target_timeval)))
6750 goto efault;
6751 } else {
6752 pvalue = NULL;
6753 }
6754 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6755 if (!is_error(ret) && arg3) {
6756 if (copy_to_user_timeval(arg3,
6757 &ovalue.it_interval)
6758 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6759 &ovalue.it_value))
6760 goto efault;
6761 }
6762 }
6763 break;
6764 case TARGET_NR_getitimer:
6765 {
6766 struct itimerval value;
6767
6768 ret = get_errno(getitimer(arg1, &value));
6769 if (!is_error(ret) && arg2) {
6770 if (copy_to_user_timeval(arg2,
6771 &value.it_interval)
6772 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6773 &value.it_value))
6774 goto efault;
6775 }
6776 }
6777 break;
6778 case TARGET_NR_stat:
6779 if (!(p = lock_user_string(arg1)))
6780 goto efault;
6781 ret = get_errno(stat(path(p), &st));
6782 unlock_user(p, arg1, 0);
6783 goto do_stat;
6784 case TARGET_NR_lstat:
6785 if (!(p = lock_user_string(arg1)))
6786 goto efault;
6787 ret = get_errno(lstat(path(p), &st));
6788 unlock_user(p, arg1, 0);
6789 goto do_stat;
6790 case TARGET_NR_fstat:
6791 {
6792 ret = get_errno(fstat(arg1, &st));
6793 do_stat:
6794 if (!is_error(ret)) {
6795 struct target_stat *target_st;
6796
6797 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6798 goto efault;
6799 memset(target_st, 0, sizeof(*target_st));
6800 __put_user(st.st_dev, &target_st->st_dev);
6801 __put_user(st.st_ino, &target_st->st_ino);
6802 __put_user(st.st_mode, &target_st->st_mode);
6803 __put_user(st.st_uid, &target_st->st_uid);
6804 __put_user(st.st_gid, &target_st->st_gid);
6805 __put_user(st.st_nlink, &target_st->st_nlink);
6806 __put_user(st.st_rdev, &target_st->st_rdev);
6807 __put_user(st.st_size, &target_st->st_size);
6808 __put_user(st.st_blksize, &target_st->st_blksize);
6809 __put_user(st.st_blocks, &target_st->st_blocks);
6810 __put_user(st.st_atime, &target_st->target_st_atime);
6811 __put_user(st.st_mtime, &target_st->target_st_mtime);
6812 __put_user(st.st_ctime, &target_st->target_st_ctime);
6813 unlock_user_struct(target_st, arg2, 1);
6814 }
6815 }
6816 break;
6817 #ifdef TARGET_NR_olduname
6818 case TARGET_NR_olduname:
6819 goto unimplemented;
6820 #endif
6821 #ifdef TARGET_NR_iopl
6822 case TARGET_NR_iopl:
6823 goto unimplemented;
6824 #endif
6825 case TARGET_NR_vhangup:
6826 ret = get_errno(vhangup());
6827 break;
6828 #ifdef TARGET_NR_idle
6829 case TARGET_NR_idle:
6830 goto unimplemented;
6831 #endif
6832 #ifdef TARGET_NR_syscall
6833 case TARGET_NR_syscall:
6834 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6835 arg6, arg7, arg8, 0);
6836 break;
6837 #endif
6838 case TARGET_NR_wait4:
6839 {
6840 int status;
6841 abi_long status_ptr = arg2;
6842 struct rusage rusage, *rusage_ptr;
6843 abi_ulong target_rusage = arg4;
6844 if (target_rusage)
6845 rusage_ptr = &rusage;
6846 else
6847 rusage_ptr = NULL;
6848 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6849 if (!is_error(ret)) {
6850 if (status_ptr && ret) {
6851 status = host_to_target_waitstatus(status);
6852 if (put_user_s32(status, status_ptr))
6853 goto efault;
6854 }
6855 if (target_rusage)
6856 host_to_target_rusage(target_rusage, &rusage);
6857 }
6858 }
6859 break;
6860 #ifdef TARGET_NR_swapoff
6861 case TARGET_NR_swapoff:
6862 if (!(p = lock_user_string(arg1)))
6863 goto efault;
6864 ret = get_errno(swapoff(p));
6865 unlock_user(p, arg1, 0);
6866 break;
6867 #endif
6868 case TARGET_NR_sysinfo:
6869 {
6870 struct target_sysinfo *target_value;
6871 struct sysinfo value;
6872 ret = get_errno(sysinfo(&value));
6873 if (!is_error(ret) && arg1)
6874 {
6875 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6876 goto efault;
6877 __put_user(value.uptime, &target_value->uptime);
6878 __put_user(value.loads[0], &target_value->loads[0]);
6879 __put_user(value.loads[1], &target_value->loads[1]);
6880 __put_user(value.loads[2], &target_value->loads[2]);
6881 __put_user(value.totalram, &target_value->totalram);
6882 __put_user(value.freeram, &target_value->freeram);
6883 __put_user(value.sharedram, &target_value->sharedram);
6884 __put_user(value.bufferram, &target_value->bufferram);
6885 __put_user(value.totalswap, &target_value->totalswap);
6886 __put_user(value.freeswap, &target_value->freeswap);
6887 __put_user(value.procs, &target_value->procs);
6888 __put_user(value.totalhigh, &target_value->totalhigh);
6889 __put_user(value.freehigh, &target_value->freehigh);
6890 __put_user(value.mem_unit, &target_value->mem_unit);
6891 unlock_user_struct(target_value, arg1, 1);
6892 }
6893 }
6894 break;
6895 #ifdef TARGET_NR_ipc
6896 case TARGET_NR_ipc:
6897 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6898 break;
6899 #endif
6900 #ifdef TARGET_NR_semget
6901 case TARGET_NR_semget:
6902 ret = get_errno(semget(arg1, arg2, arg3));
6903 break;
6904 #endif
6905 #ifdef TARGET_NR_semop
6906 case TARGET_NR_semop:
6907 ret = get_errno(do_semop(arg1, arg2, arg3));
6908 break;
6909 #endif
6910 #ifdef TARGET_NR_semctl
6911 case TARGET_NR_semctl:
6912 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6913 break;
6914 #endif
6915 #ifdef TARGET_NR_msgctl
6916 case TARGET_NR_msgctl:
6917 ret = do_msgctl(arg1, arg2, arg3);
6918 break;
6919 #endif
6920 #ifdef TARGET_NR_msgget
6921 case TARGET_NR_msgget:
6922 ret = get_errno(msgget(arg1, arg2));
6923 break;
6924 #endif
6925 #ifdef TARGET_NR_msgrcv
6926 case TARGET_NR_msgrcv:
6927 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6928 break;
6929 #endif
6930 #ifdef TARGET_NR_msgsnd
6931 case TARGET_NR_msgsnd:
6932 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6933 break;
6934 #endif
6935 #ifdef TARGET_NR_shmget
6936 case TARGET_NR_shmget:
6937 ret = get_errno(shmget(arg1, arg2, arg3));
6938 break;
6939 #endif
6940 #ifdef TARGET_NR_shmctl
6941 case TARGET_NR_shmctl:
6942 ret = do_shmctl(arg1, arg2, arg3);
6943 break;
6944 #endif
6945 #ifdef TARGET_NR_shmat
6946 case TARGET_NR_shmat:
6947 ret = do_shmat(arg1, arg2, arg3);
6948 break;
6949 #endif
6950 #ifdef TARGET_NR_shmdt
6951 case TARGET_NR_shmdt:
6952 ret = do_shmdt(arg1);
6953 break;
6954 #endif
6955 case TARGET_NR_fsync:
6956 ret = get_errno(fsync(arg1));
6957 break;
6958 case TARGET_NR_clone:
6959 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6960 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6961 #elif defined(TARGET_CRIS)
6962 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6963 #elif defined(TARGET_MICROBLAZE)
6964 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6965 #elif defined(TARGET_S390X)
6966 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6967 #else
6968 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6969 #endif
6970 break;
6971 #ifdef __NR_exit_group
6972 /* new thread calls */
6973 case TARGET_NR_exit_group:
6974 #ifdef TARGET_GPROF
6975 _mcleanup();
6976 #endif
6977 gdb_exit(cpu_env, arg1);
6978 ret = get_errno(exit_group(arg1));
6979 break;
6980 #endif
6981 case TARGET_NR_setdomainname:
6982 if (!(p = lock_user_string(arg1)))
6983 goto efault;
6984 ret = get_errno(setdomainname(p, arg2));
6985 unlock_user(p, arg1, 0);
6986 break;
6987 case TARGET_NR_uname:
6988 /* no need to transcode because we use the linux syscall */
6989 {
6990 struct new_utsname * buf;
6991
6992 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6993 goto efault;
6994 ret = get_errno(sys_uname(buf));
6995 if (!is_error(ret)) {
6996 /* Overrite the native machine name with whatever is being
6997 emulated. */
6998 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6999 /* Allow the user to override the reported release. */
7000 if (qemu_uname_release && *qemu_uname_release)
7001 strcpy (buf->release, qemu_uname_release);
7002 }
7003 unlock_user_struct(buf, arg1, 1);
7004 }
7005 break;
7006 #ifdef TARGET_I386
7007 case TARGET_NR_modify_ldt:
7008 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7009 break;
7010 #if !defined(TARGET_X86_64)
7011 case TARGET_NR_vm86old:
7012 goto unimplemented;
7013 case TARGET_NR_vm86:
7014 ret = do_vm86(cpu_env, arg1, arg2);
7015 break;
7016 #endif
7017 #endif
7018 case TARGET_NR_adjtimex:
7019 goto unimplemented;
7020 #ifdef TARGET_NR_create_module
7021 case TARGET_NR_create_module:
7022 #endif
7023 case TARGET_NR_init_module:
7024 case TARGET_NR_delete_module:
7025 #ifdef TARGET_NR_get_kernel_syms
7026 case TARGET_NR_get_kernel_syms:
7027 #endif
7028 goto unimplemented;
7029 case TARGET_NR_quotactl:
7030 goto unimplemented;
7031 case TARGET_NR_getpgid:
7032 ret = get_errno(getpgid(arg1));
7033 break;
7034 case TARGET_NR_fchdir:
7035 ret = get_errno(fchdir(arg1));
7036 break;
7037 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7038 case TARGET_NR_bdflush:
7039 goto unimplemented;
7040 #endif
7041 #ifdef TARGET_NR_sysfs
7042 case TARGET_NR_sysfs:
7043 goto unimplemented;
7044 #endif
7045 case TARGET_NR_personality:
7046 ret = get_errno(personality(arg1));
7047 break;
7048 #ifdef TARGET_NR_afs_syscall
7049 case TARGET_NR_afs_syscall:
7050 goto unimplemented;
7051 #endif
7052 #ifdef TARGET_NR__llseek /* Not on alpha */
7053 case TARGET_NR__llseek:
7054 {
7055 int64_t res;
7056 #if !defined(__NR_llseek)
7057 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7058 if (res == -1) {
7059 ret = get_errno(res);
7060 } else {
7061 ret = 0;
7062 }
7063 #else
7064 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7065 #endif
7066 if ((ret == 0) && put_user_s64(res, arg4)) {
7067 goto efault;
7068 }
7069 }
7070 break;
7071 #endif
7072 case TARGET_NR_getdents:
7073 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7074 {
7075 struct target_dirent *target_dirp;
7076 struct linux_dirent *dirp;
7077 abi_long count = arg3;
7078
7079 dirp = malloc(count);
7080 if (!dirp) {
7081 ret = -TARGET_ENOMEM;
7082 goto fail;
7083 }
7084
7085 ret = get_errno(sys_getdents(arg1, dirp, count));
7086 if (!is_error(ret)) {
7087 struct linux_dirent *de;
7088 struct target_dirent *tde;
7089 int len = ret;
7090 int reclen, treclen;
7091 int count1, tnamelen;
7092
7093 count1 = 0;
7094 de = dirp;
7095 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7096 goto efault;
7097 tde = target_dirp;
7098 while (len > 0) {
7099 reclen = de->d_reclen;
7100 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7101 assert(tnamelen >= 0);
7102 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7103 assert(count1 + treclen <= count);
7104 tde->d_reclen = tswap16(treclen);
7105 tde->d_ino = tswapal(de->d_ino);
7106 tde->d_off = tswapal(de->d_off);
7107 memcpy(tde->d_name, de->d_name, tnamelen);
7108 de = (struct linux_dirent *)((char *)de + reclen);
7109 len -= reclen;
7110 tde = (struct target_dirent *)((char *)tde + treclen);
7111 count1 += treclen;
7112 }
7113 ret = count1;
7114 unlock_user(target_dirp, arg2, ret);
7115 }
7116 free(dirp);
7117 }
7118 #else
7119 {
7120 struct linux_dirent *dirp;
7121 abi_long count = arg3;
7122
7123 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7124 goto efault;
7125 ret = get_errno(sys_getdents(arg1, dirp, count));
7126 if (!is_error(ret)) {
7127 struct linux_dirent *de;
7128 int len = ret;
7129 int reclen;
7130 de = dirp;
7131 while (len > 0) {
7132 reclen = de->d_reclen;
7133 if (reclen > len)
7134 break;
7135 de->d_reclen = tswap16(reclen);
7136 tswapls(&de->d_ino);
7137 tswapls(&de->d_off);
7138 de = (struct linux_dirent *)((char *)de + reclen);
7139 len -= reclen;
7140 }
7141 }
7142 unlock_user(dirp, arg2, ret);
7143 }
7144 #endif
7145 break;
7146 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7147 case TARGET_NR_getdents64:
7148 {
7149 struct linux_dirent64 *dirp;
7150 abi_long count = arg3;
7151 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7152 goto efault;
7153 ret = get_errno(sys_getdents64(arg1, dirp, count));
7154 if (!is_error(ret)) {
7155 struct linux_dirent64 *de;
7156 int len = ret;
7157 int reclen;
7158 de = dirp;
7159 while (len > 0) {
7160 reclen = de->d_reclen;
7161 if (reclen > len)
7162 break;
7163 de->d_reclen = tswap16(reclen);
7164 tswap64s((uint64_t *)&de->d_ino);
7165 tswap64s((uint64_t *)&de->d_off);
7166 de = (struct linux_dirent64 *)((char *)de + reclen);
7167 len -= reclen;
7168 }
7169 }
7170 unlock_user(dirp, arg2, ret);
7171 }
7172 break;
7173 #endif /* TARGET_NR_getdents64 */
7174 #if defined(TARGET_NR__newselect)
7175 case TARGET_NR__newselect:
7176 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7177 break;
7178 #endif
7179 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7180 # ifdef TARGET_NR_poll
7181 case TARGET_NR_poll:
7182 # endif
7183 # ifdef TARGET_NR_ppoll
7184 case TARGET_NR_ppoll:
7185 # endif
7186 {
7187 struct target_pollfd *target_pfd;
7188 unsigned int nfds = arg2;
7189 int timeout = arg3;
7190 struct pollfd *pfd;
7191 unsigned int i;
7192
7193 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7194 if (!target_pfd)
7195 goto efault;
7196
7197 pfd = alloca(sizeof(struct pollfd) * nfds);
7198 for(i = 0; i < nfds; i++) {
7199 pfd[i].fd = tswap32(target_pfd[i].fd);
7200 pfd[i].events = tswap16(target_pfd[i].events);
7201 }
7202
7203 # ifdef TARGET_NR_ppoll
7204 if (num == TARGET_NR_ppoll) {
7205 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7206 target_sigset_t *target_set;
7207 sigset_t _set, *set = &_set;
7208
7209 if (arg3) {
7210 if (target_to_host_timespec(timeout_ts, arg3)) {
7211 unlock_user(target_pfd, arg1, 0);
7212 goto efault;
7213 }
7214 } else {
7215 timeout_ts = NULL;
7216 }
7217
7218 if (arg4) {
7219 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7220 if (!target_set) {
7221 unlock_user(target_pfd, arg1, 0);
7222 goto efault;
7223 }
7224 target_to_host_sigset(set, target_set);
7225 } else {
7226 set = NULL;
7227 }
7228
7229 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7230
7231 if (!is_error(ret) && arg3) {
7232 host_to_target_timespec(arg3, timeout_ts);
7233 }
7234 if (arg4) {
7235 unlock_user(target_set, arg4, 0);
7236 }
7237 } else
7238 # endif
7239 ret = get_errno(poll(pfd, nfds, timeout));
7240
7241 if (!is_error(ret)) {
7242 for(i = 0; i < nfds; i++) {
7243 target_pfd[i].revents = tswap16(pfd[i].revents);
7244 }
7245 }
7246 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7247 }
7248 break;
7249 #endif
7250 case TARGET_NR_flock:
7251 /* NOTE: the flock constant seems to be the same for every
7252 Linux platform */
7253 ret = get_errno(flock(arg1, arg2));
7254 break;
7255 case TARGET_NR_readv:
7256 {
7257 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7258 if (vec != NULL) {
7259 ret = get_errno(readv(arg1, vec, arg3));
7260 unlock_iovec(vec, arg2, arg3, 1);
7261 } else {
7262 ret = -host_to_target_errno(errno);
7263 }
7264 }
7265 break;
7266 case TARGET_NR_writev:
7267 {
7268 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7269 if (vec != NULL) {
7270 ret = get_errno(writev(arg1, vec, arg3));
7271 unlock_iovec(vec, arg2, arg3, 0);
7272 } else {
7273 ret = -host_to_target_errno(errno);
7274 }
7275 }
7276 break;
7277 case TARGET_NR_getsid:
7278 ret = get_errno(getsid(arg1));
7279 break;
7280 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7281 case TARGET_NR_fdatasync:
7282 ret = get_errno(fdatasync(arg1));
7283 break;
7284 #endif
7285 case TARGET_NR__sysctl:
7286 /* We don't implement this, but ENOTDIR is always a safe
7287 return value. */
7288 ret = -TARGET_ENOTDIR;
7289 break;
7290 case TARGET_NR_sched_getaffinity:
7291 {
7292 unsigned int mask_size;
7293 unsigned long *mask;
7294
7295 /*
7296 * sched_getaffinity needs multiples of ulong, so need to take
7297 * care of mismatches between target ulong and host ulong sizes.
7298 */
7299 if (arg2 & (sizeof(abi_ulong) - 1)) {
7300 ret = -TARGET_EINVAL;
7301 break;
7302 }
7303 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7304
7305 mask = alloca(mask_size);
7306 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7307
7308 if (!is_error(ret)) {
7309 if (copy_to_user(arg3, mask, ret)) {
7310 goto efault;
7311 }
7312 }
7313 }
7314 break;
7315 case TARGET_NR_sched_setaffinity:
7316 {
7317 unsigned int mask_size;
7318 unsigned long *mask;
7319
7320 /*
7321 * sched_setaffinity needs multiples of ulong, so need to take
7322 * care of mismatches between target ulong and host ulong sizes.
7323 */
7324 if (arg2 & (sizeof(abi_ulong) - 1)) {
7325 ret = -TARGET_EINVAL;
7326 break;
7327 }
7328 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7329
7330 mask = alloca(mask_size);
7331 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7332 goto efault;
7333 }
7334 memcpy(mask, p, arg2);
7335 unlock_user_struct(p, arg2, 0);
7336
7337 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7338 }
7339 break;
7340 case TARGET_NR_sched_setparam:
7341 {
7342 struct sched_param *target_schp;
7343 struct sched_param schp;
7344
7345 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7346 goto efault;
7347 schp.sched_priority = tswap32(target_schp->sched_priority);
7348 unlock_user_struct(target_schp, arg2, 0);
7349 ret = get_errno(sched_setparam(arg1, &schp));
7350 }
7351 break;
7352 case TARGET_NR_sched_getparam:
7353 {
7354 struct sched_param *target_schp;
7355 struct sched_param schp;
7356 ret = get_errno(sched_getparam(arg1, &schp));
7357 if (!is_error(ret)) {
7358 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7359 goto efault;
7360 target_schp->sched_priority = tswap32(schp.sched_priority);
7361 unlock_user_struct(target_schp, arg2, 1);
7362 }
7363 }
7364 break;
7365 case TARGET_NR_sched_setscheduler:
7366 {
7367 struct sched_param *target_schp;
7368 struct sched_param schp;
7369 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7370 goto efault;
7371 schp.sched_priority = tswap32(target_schp->sched_priority);
7372 unlock_user_struct(target_schp, arg3, 0);
7373 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7374 }
7375 break;
7376 case TARGET_NR_sched_getscheduler:
7377 ret = get_errno(sched_getscheduler(arg1));
7378 break;
7379 case TARGET_NR_sched_yield:
7380 ret = get_errno(sched_yield());
7381 break;
7382 case TARGET_NR_sched_get_priority_max:
7383 ret = get_errno(sched_get_priority_max(arg1));
7384 break;
7385 case TARGET_NR_sched_get_priority_min:
7386 ret = get_errno(sched_get_priority_min(arg1));
7387 break;
7388 case TARGET_NR_sched_rr_get_interval:
7389 {
7390 struct timespec ts;
7391 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7392 if (!is_error(ret)) {
7393 host_to_target_timespec(arg2, &ts);
7394 }
7395 }
7396 break;
7397 case TARGET_NR_nanosleep:
7398 {
7399 struct timespec req, rem;
7400 target_to_host_timespec(&req, arg1);
7401 ret = get_errno(nanosleep(&req, &rem));
7402 if (is_error(ret) && arg2) {
7403 host_to_target_timespec(arg2, &rem);
7404 }
7405 }
7406 break;
7407 #ifdef TARGET_NR_query_module
7408 case TARGET_NR_query_module:
7409 goto unimplemented;
7410 #endif
7411 #ifdef TARGET_NR_nfsservctl
7412 case TARGET_NR_nfsservctl:
7413 goto unimplemented;
7414 #endif
7415 case TARGET_NR_prctl:
7416 switch (arg1) {
7417 case PR_GET_PDEATHSIG:
7418 {
7419 int deathsig;
7420 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7421 if (!is_error(ret) && arg2
7422 && put_user_ual(deathsig, arg2)) {
7423 goto efault;
7424 }
7425 break;
7426 }
7427 #ifdef PR_GET_NAME
7428 case PR_GET_NAME:
7429 {
7430 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7431 if (!name) {
7432 goto efault;
7433 }
7434 ret = get_errno(prctl(arg1, (unsigned long)name,
7435 arg3, arg4, arg5));
7436 unlock_user(name, arg2, 16);
7437 break;
7438 }
7439 case PR_SET_NAME:
7440 {
7441 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7442 if (!name) {
7443 goto efault;
7444 }
7445 ret = get_errno(prctl(arg1, (unsigned long)name,
7446 arg3, arg4, arg5));
7447 unlock_user(name, arg2, 0);
7448 break;
7449 }
7450 #endif
7451 default:
7452 /* Most prctl options have no pointer arguments */
7453 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7454 break;
7455 }
7456 break;
7457 #ifdef TARGET_NR_arch_prctl
7458 case TARGET_NR_arch_prctl:
7459 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7460 ret = do_arch_prctl(cpu_env, arg1, arg2);
7461 break;
7462 #else
7463 goto unimplemented;
7464 #endif
7465 #endif
7466 #ifdef TARGET_NR_pread64
7467 case TARGET_NR_pread64:
7468 if (regpairs_aligned(cpu_env)) {
7469 arg4 = arg5;
7470 arg5 = arg6;
7471 }
7472 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7473 goto efault;
7474 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7475 unlock_user(p, arg2, ret);
7476 break;
7477 case TARGET_NR_pwrite64:
7478 if (regpairs_aligned(cpu_env)) {
7479 arg4 = arg5;
7480 arg5 = arg6;
7481 }
7482 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7483 goto efault;
7484 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7485 unlock_user(p, arg2, 0);
7486 break;
7487 #endif
7488 case TARGET_NR_getcwd:
7489 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7490 goto efault;
7491 ret = get_errno(sys_getcwd1(p, arg2));
7492 unlock_user(p, arg1, ret);
7493 break;
7494 case TARGET_NR_capget:
7495 goto unimplemented;
7496 case TARGET_NR_capset:
7497 goto unimplemented;
7498 case TARGET_NR_sigaltstack:
7499 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7500 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7501 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7502 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7503 break;
7504 #else
7505 goto unimplemented;
7506 #endif
7507 case TARGET_NR_sendfile:
7508 goto unimplemented;
7509 #ifdef TARGET_NR_getpmsg
7510 case TARGET_NR_getpmsg:
7511 goto unimplemented;
7512 #endif
7513 #ifdef TARGET_NR_putpmsg
7514 case TARGET_NR_putpmsg:
7515 goto unimplemented;
7516 #endif
7517 #ifdef TARGET_NR_vfork
7518 case TARGET_NR_vfork:
7519 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7520 0, 0, 0, 0));
7521 break;
7522 #endif
7523 #ifdef TARGET_NR_ugetrlimit
7524 case TARGET_NR_ugetrlimit:
7525 {
7526 struct rlimit rlim;
7527 int resource = target_to_host_resource(arg1);
7528 ret = get_errno(getrlimit(resource, &rlim));
7529 if (!is_error(ret)) {
7530 struct target_rlimit *target_rlim;
7531 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7532 goto efault;
7533 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7534 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7535 unlock_user_struct(target_rlim, arg2, 1);
7536 }
7537 break;
7538 }
7539 #endif
7540 #ifdef TARGET_NR_truncate64
7541 case TARGET_NR_truncate64:
7542 if (!(p = lock_user_string(arg1)))
7543 goto efault;
7544 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7545 unlock_user(p, arg1, 0);
7546 break;
7547 #endif
7548 #ifdef TARGET_NR_ftruncate64
7549 case TARGET_NR_ftruncate64:
7550 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7551 break;
7552 #endif
7553 #ifdef TARGET_NR_stat64
7554 case TARGET_NR_stat64:
7555 if (!(p = lock_user_string(arg1)))
7556 goto efault;
7557 ret = get_errno(stat(path(p), &st));
7558 unlock_user(p, arg1, 0);
7559 if (!is_error(ret))
7560 ret = host_to_target_stat64(cpu_env, arg2, &st);
7561 break;
7562 #endif
7563 #ifdef TARGET_NR_lstat64
7564 case TARGET_NR_lstat64:
7565 if (!(p = lock_user_string(arg1)))
7566 goto efault;
7567 ret = get_errno(lstat(path(p), &st));
7568 unlock_user(p, arg1, 0);
7569 if (!is_error(ret))
7570 ret = host_to_target_stat64(cpu_env, arg2, &st);
7571 break;
7572 #endif
7573 #ifdef TARGET_NR_fstat64
7574 case TARGET_NR_fstat64:
7575 ret = get_errno(fstat(arg1, &st));
7576 if (!is_error(ret))
7577 ret = host_to_target_stat64(cpu_env, arg2, &st);
7578 break;
7579 #endif
7580 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7581 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7582 #ifdef TARGET_NR_fstatat64
7583 case TARGET_NR_fstatat64:
7584 #endif
7585 #ifdef TARGET_NR_newfstatat
7586 case TARGET_NR_newfstatat:
7587 #endif
7588 if (!(p = lock_user_string(arg2)))
7589 goto efault;
7590 #ifdef __NR_fstatat64
7591 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7592 #else
7593 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7594 #endif
7595 if (!is_error(ret))
7596 ret = host_to_target_stat64(cpu_env, arg3, &st);
7597 break;
7598 #endif
7599 case TARGET_NR_lchown:
7600 if (!(p = lock_user_string(arg1)))
7601 goto efault;
7602 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7603 unlock_user(p, arg1, 0);
7604 break;
7605 #ifdef TARGET_NR_getuid
7606 case TARGET_NR_getuid:
7607 ret = get_errno(high2lowuid(getuid()));
7608 break;
7609 #endif
7610 #ifdef TARGET_NR_getgid
7611 case TARGET_NR_getgid:
7612 ret = get_errno(high2lowgid(getgid()));
7613 break;
7614 #endif
7615 #ifdef TARGET_NR_geteuid
7616 case TARGET_NR_geteuid:
7617 ret = get_errno(high2lowuid(geteuid()));
7618 break;
7619 #endif
7620 #ifdef TARGET_NR_getegid
7621 case TARGET_NR_getegid:
7622 ret = get_errno(high2lowgid(getegid()));
7623 break;
7624 #endif
7625 case TARGET_NR_setreuid:
7626 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7627 break;
7628 case TARGET_NR_setregid:
7629 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7630 break;
7631 case TARGET_NR_getgroups:
7632 {
7633 int gidsetsize = arg1;
7634 target_id *target_grouplist;
7635 gid_t *grouplist;
7636 int i;
7637
7638 grouplist = alloca(gidsetsize * sizeof(gid_t));
7639 ret = get_errno(getgroups(gidsetsize, grouplist));
7640 if (gidsetsize == 0)
7641 break;
7642 if (!is_error(ret)) {
7643 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7644 if (!target_grouplist)
7645 goto efault;
7646 for(i = 0;i < ret; i++)
7647 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7648 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7649 }
7650 }
7651 break;
7652 case TARGET_NR_setgroups:
7653 {
7654 int gidsetsize = arg1;
7655 target_id *target_grouplist;
7656 gid_t *grouplist;
7657 int i;
7658
7659 grouplist = alloca(gidsetsize * sizeof(gid_t));
7660 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7661 if (!target_grouplist) {
7662 ret = -TARGET_EFAULT;
7663 goto fail;
7664 }
7665 for(i = 0;i < gidsetsize; i++)
7666 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7667 unlock_user(target_grouplist, arg2, 0);
7668 ret = get_errno(setgroups(gidsetsize, grouplist));
7669 }
7670 break;
7671 case TARGET_NR_fchown:
7672 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7673 break;
7674 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7675 case TARGET_NR_fchownat:
7676 if (!(p = lock_user_string(arg2)))
7677 goto efault;
7678 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7679 unlock_user(p, arg2, 0);
7680 break;
7681 #endif
7682 #ifdef TARGET_NR_setresuid
7683 case TARGET_NR_setresuid:
7684 ret = get_errno(setresuid(low2highuid(arg1),
7685 low2highuid(arg2),
7686 low2highuid(arg3)));
7687 break;
7688 #endif
7689 #ifdef TARGET_NR_getresuid
7690 case TARGET_NR_getresuid:
7691 {
7692 uid_t ruid, euid, suid;
7693 ret = get_errno(getresuid(&ruid, &euid, &suid));
7694 if (!is_error(ret)) {
7695 if (put_user_u16(high2lowuid(ruid), arg1)
7696 || put_user_u16(high2lowuid(euid), arg2)
7697 || put_user_u16(high2lowuid(suid), arg3))
7698 goto efault;
7699 }
7700 }
7701 break;
7702 #endif
7703 #ifdef TARGET_NR_getresgid
7704 case TARGET_NR_setresgid:
7705 ret = get_errno(setresgid(low2highgid(arg1),
7706 low2highgid(arg2),
7707 low2highgid(arg3)));
7708 break;
7709 #endif
7710 #ifdef TARGET_NR_getresgid
7711 case TARGET_NR_getresgid:
7712 {
7713 gid_t rgid, egid, sgid;
7714 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7715 if (!is_error(ret)) {
7716 if (put_user_u16(high2lowgid(rgid), arg1)
7717 || put_user_u16(high2lowgid(egid), arg2)
7718 || put_user_u16(high2lowgid(sgid), arg3))
7719 goto efault;
7720 }
7721 }
7722 break;
7723 #endif
7724 case TARGET_NR_chown:
7725 if (!(p = lock_user_string(arg1)))
7726 goto efault;
7727 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7728 unlock_user(p, arg1, 0);
7729 break;
7730 case TARGET_NR_setuid:
7731 ret = get_errno(setuid(low2highuid(arg1)));
7732 break;
7733 case TARGET_NR_setgid:
7734 ret = get_errno(setgid(low2highgid(arg1)));
7735 break;
7736 case TARGET_NR_setfsuid:
7737 ret = get_errno(setfsuid(arg1));
7738 break;
7739 case TARGET_NR_setfsgid:
7740 ret = get_errno(setfsgid(arg1));
7741 break;
7742
7743 #ifdef TARGET_NR_lchown32
7744 case TARGET_NR_lchown32:
7745 if (!(p = lock_user_string(arg1)))
7746 goto efault;
7747 ret = get_errno(lchown(p, arg2, arg3));
7748 unlock_user(p, arg1, 0);
7749 break;
7750 #endif
7751 #ifdef TARGET_NR_getuid32
7752 case TARGET_NR_getuid32:
7753 ret = get_errno(getuid());
7754 break;
7755 #endif
7756
7757 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7758 /* Alpha specific */
7759 case TARGET_NR_getxuid:
7760 {
7761 uid_t euid;
7762 euid=geteuid();
7763 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7764 }
7765 ret = get_errno(getuid());
7766 break;
7767 #endif
7768 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7769 /* Alpha specific */
7770 case TARGET_NR_getxgid:
7771 {
7772 uid_t egid;
7773 egid=getegid();
7774 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7775 }
7776 ret = get_errno(getgid());
7777 break;
7778 #endif
7779 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7780 /* Alpha specific */
7781 case TARGET_NR_osf_getsysinfo:
7782 ret = -TARGET_EOPNOTSUPP;
7783 switch (arg1) {
7784 case TARGET_GSI_IEEE_FP_CONTROL:
7785 {
7786 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7787
7788 /* Copied from linux ieee_fpcr_to_swcr. */
7789 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7790 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7791 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7792 | SWCR_TRAP_ENABLE_DZE
7793 | SWCR_TRAP_ENABLE_OVF);
7794 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7795 | SWCR_TRAP_ENABLE_INE);
7796 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7797 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7798
7799 if (put_user_u64 (swcr, arg2))
7800 goto efault;
7801 ret = 0;
7802 }
7803 break;
7804
7805 /* case GSI_IEEE_STATE_AT_SIGNAL:
7806 -- Not implemented in linux kernel.
7807 case GSI_UACPROC:
7808 -- Retrieves current unaligned access state; not much used.
7809 case GSI_PROC_TYPE:
7810 -- Retrieves implver information; surely not used.
7811 case GSI_GET_HWRPB:
7812 -- Grabs a copy of the HWRPB; surely not used.
7813 */
7814 }
7815 break;
7816 #endif
7817 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7818 /* Alpha specific */
7819 case TARGET_NR_osf_setsysinfo:
7820 ret = -TARGET_EOPNOTSUPP;
7821 switch (arg1) {
7822 case TARGET_SSI_IEEE_FP_CONTROL:
7823 {
7824 uint64_t swcr, fpcr, orig_fpcr;
7825
7826 if (get_user_u64 (swcr, arg2)) {
7827 goto efault;
7828 }
7829 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7830 fpcr = orig_fpcr & FPCR_DYN_MASK;
7831
7832 /* Copied from linux ieee_swcr_to_fpcr. */
7833 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7834 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7835 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7836 | SWCR_TRAP_ENABLE_DZE
7837 | SWCR_TRAP_ENABLE_OVF)) << 48;
7838 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7839 | SWCR_TRAP_ENABLE_INE)) << 57;
7840 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7841 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7842
7843 cpu_alpha_store_fpcr(cpu_env, fpcr);
7844 ret = 0;
7845 }
7846 break;
7847
7848 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7849 {
7850 uint64_t exc, fpcr, orig_fpcr;
7851 int si_code;
7852
7853 if (get_user_u64(exc, arg2)) {
7854 goto efault;
7855 }
7856
7857 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7858
7859 /* We only add to the exception status here. */
7860 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7861
7862 cpu_alpha_store_fpcr(cpu_env, fpcr);
7863 ret = 0;
7864
7865 /* Old exceptions are not signaled. */
7866 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7867
7868 /* If any exceptions set by this call,
7869 and are unmasked, send a signal. */
7870 si_code = 0;
7871 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7872 si_code = TARGET_FPE_FLTRES;
7873 }
7874 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7875 si_code = TARGET_FPE_FLTUND;
7876 }
7877 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7878 si_code = TARGET_FPE_FLTOVF;
7879 }
7880 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7881 si_code = TARGET_FPE_FLTDIV;
7882 }
7883 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7884 si_code = TARGET_FPE_FLTINV;
7885 }
7886 if (si_code != 0) {
7887 target_siginfo_t info;
7888 info.si_signo = SIGFPE;
7889 info.si_errno = 0;
7890 info.si_code = si_code;
7891 info._sifields._sigfault._addr
7892 = ((CPUArchState *)cpu_env)->pc;
7893 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7894 }
7895 }
7896 break;
7897
7898 /* case SSI_NVPAIRS:
7899 -- Used with SSIN_UACPROC to enable unaligned accesses.
7900 case SSI_IEEE_STATE_AT_SIGNAL:
7901 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7902 -- Not implemented in linux kernel
7903 */
7904 }
7905 break;
7906 #endif
7907 #ifdef TARGET_NR_osf_sigprocmask
7908 /* Alpha specific. */
7909 case TARGET_NR_osf_sigprocmask:
7910 {
7911 abi_ulong mask;
7912 int how;
7913 sigset_t set, oldset;
7914
7915 switch(arg1) {
7916 case TARGET_SIG_BLOCK:
7917 how = SIG_BLOCK;
7918 break;
7919 case TARGET_SIG_UNBLOCK:
7920 how = SIG_UNBLOCK;
7921 break;
7922 case TARGET_SIG_SETMASK:
7923 how = SIG_SETMASK;
7924 break;
7925 default:
7926 ret = -TARGET_EINVAL;
7927 goto fail;
7928 }
7929 mask = arg2;
7930 target_to_host_old_sigset(&set, &mask);
7931 sigprocmask(how, &set, &oldset);
7932 host_to_target_old_sigset(&mask, &oldset);
7933 ret = mask;
7934 }
7935 break;
7936 #endif
7937
7938 #ifdef TARGET_NR_getgid32
7939 case TARGET_NR_getgid32:
7940 ret = get_errno(getgid());
7941 break;
7942 #endif
7943 #ifdef TARGET_NR_geteuid32
7944 case TARGET_NR_geteuid32:
7945 ret = get_errno(geteuid());
7946 break;
7947 #endif
7948 #ifdef TARGET_NR_getegid32
7949 case TARGET_NR_getegid32:
7950 ret = get_errno(getegid());
7951 break;
7952 #endif
7953 #ifdef TARGET_NR_setreuid32
7954 case TARGET_NR_setreuid32:
7955 ret = get_errno(setreuid(arg1, arg2));
7956 break;
7957 #endif
7958 #ifdef TARGET_NR_setregid32
7959 case TARGET_NR_setregid32:
7960 ret = get_errno(setregid(arg1, arg2));
7961 break;
7962 #endif
7963 #ifdef TARGET_NR_getgroups32
7964 case TARGET_NR_getgroups32:
7965 {
7966 int gidsetsize = arg1;
7967 uint32_t *target_grouplist;
7968 gid_t *grouplist;
7969 int i;
7970
7971 grouplist = alloca(gidsetsize * sizeof(gid_t));
7972 ret = get_errno(getgroups(gidsetsize, grouplist));
7973 if (gidsetsize == 0)
7974 break;
7975 if (!is_error(ret)) {
7976 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7977 if (!target_grouplist) {
7978 ret = -TARGET_EFAULT;
7979 goto fail;
7980 }
7981 for(i = 0;i < ret; i++)
7982 target_grouplist[i] = tswap32(grouplist[i]);
7983 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7984 }
7985 }
7986 break;
7987 #endif
7988 #ifdef TARGET_NR_setgroups32
7989 case TARGET_NR_setgroups32:
7990 {
7991 int gidsetsize = arg1;
7992 uint32_t *target_grouplist;
7993 gid_t *grouplist;
7994 int i;
7995
7996 grouplist = alloca(gidsetsize * sizeof(gid_t));
7997 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7998 if (!target_grouplist) {
7999 ret = -TARGET_EFAULT;
8000 goto fail;
8001 }
8002 for(i = 0;i < gidsetsize; i++)
8003 grouplist[i] = tswap32(target_grouplist[i]);
8004 unlock_user(target_grouplist, arg2, 0);
8005 ret = get_errno(setgroups(gidsetsize, grouplist));
8006 }
8007 break;
8008 #endif
8009 #ifdef TARGET_NR_fchown32
8010 case TARGET_NR_fchown32:
8011 ret = get_errno(fchown(arg1, arg2, arg3));
8012 break;
8013 #endif
8014 #ifdef TARGET_NR_setresuid32
8015 case TARGET_NR_setresuid32:
8016 ret = get_errno(setresuid(arg1, arg2, arg3));
8017 break;
8018 #endif
8019 #ifdef TARGET_NR_getresuid32
8020 case TARGET_NR_getresuid32:
8021 {
8022 uid_t ruid, euid, suid;
8023 ret = get_errno(getresuid(&ruid, &euid, &suid));
8024 if (!is_error(ret)) {
8025 if (put_user_u32(ruid, arg1)
8026 || put_user_u32(euid, arg2)
8027 || put_user_u32(suid, arg3))
8028 goto efault;
8029 }
8030 }
8031 break;
8032 #endif
8033 #ifdef TARGET_NR_setresgid32
8034 case TARGET_NR_setresgid32:
8035 ret = get_errno(setresgid(arg1, arg2, arg3));
8036 break;
8037 #endif
8038 #ifdef TARGET_NR_getresgid32
8039 case TARGET_NR_getresgid32:
8040 {
8041 gid_t rgid, egid, sgid;
8042 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8043 if (!is_error(ret)) {
8044 if (put_user_u32(rgid, arg1)
8045 || put_user_u32(egid, arg2)
8046 || put_user_u32(sgid, arg3))
8047 goto efault;
8048 }
8049 }
8050 break;
8051 #endif
8052 #ifdef TARGET_NR_chown32
8053 case TARGET_NR_chown32:
8054 if (!(p = lock_user_string(arg1)))
8055 goto efault;
8056 ret = get_errno(chown(p, arg2, arg3));
8057 unlock_user(p, arg1, 0);
8058 break;
8059 #endif
8060 #ifdef TARGET_NR_setuid32
8061 case TARGET_NR_setuid32:
8062 ret = get_errno(setuid(arg1));
8063 break;
8064 #endif
8065 #ifdef TARGET_NR_setgid32
8066 case TARGET_NR_setgid32:
8067 ret = get_errno(setgid(arg1));
8068 break;
8069 #endif
8070 #ifdef TARGET_NR_setfsuid32
8071 case TARGET_NR_setfsuid32:
8072 ret = get_errno(setfsuid(arg1));
8073 break;
8074 #endif
8075 #ifdef TARGET_NR_setfsgid32
8076 case TARGET_NR_setfsgid32:
8077 ret = get_errno(setfsgid(arg1));
8078 break;
8079 #endif
8080
8081 case TARGET_NR_pivot_root:
8082 goto unimplemented;
8083 #ifdef TARGET_NR_mincore
8084 case TARGET_NR_mincore:
8085 {
8086 void *a;
8087 ret = -TARGET_EFAULT;
8088 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8089 goto efault;
8090 if (!(p = lock_user_string(arg3)))
8091 goto mincore_fail;
8092 ret = get_errno(mincore(a, arg2, p));
8093 unlock_user(p, arg3, ret);
8094 mincore_fail:
8095 unlock_user(a, arg1, 0);
8096 }
8097 break;
8098 #endif
8099 #ifdef TARGET_NR_arm_fadvise64_64
8100 case TARGET_NR_arm_fadvise64_64:
8101 {
8102 /*
8103 * arm_fadvise64_64 looks like fadvise64_64 but
8104 * with different argument order
8105 */
8106 abi_long temp;
8107 temp = arg3;
8108 arg3 = arg4;
8109 arg4 = temp;
8110 }
8111 #endif
8112 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8113 #ifdef TARGET_NR_fadvise64_64
8114 case TARGET_NR_fadvise64_64:
8115 #endif
8116 #ifdef TARGET_NR_fadvise64
8117 case TARGET_NR_fadvise64:
8118 #endif
8119 #ifdef TARGET_S390X
8120 switch (arg4) {
8121 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8122 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8123 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8124 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8125 default: break;
8126 }
8127 #endif
8128 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8129 break;
8130 #endif
8131 #ifdef TARGET_NR_madvise
8132 case TARGET_NR_madvise:
8133 /* A straight passthrough may not be safe because qemu sometimes
8134 turns private flie-backed mappings into anonymous mappings.
8135 This will break MADV_DONTNEED.
8136 This is a hint, so ignoring and returning success is ok. */
8137 ret = get_errno(0);
8138 break;
8139 #endif
8140 #if TARGET_ABI_BITS == 32
8141 case TARGET_NR_fcntl64:
8142 {
8143 int cmd;
8144 struct flock64 fl;
8145 struct target_flock64 *target_fl;
8146 #ifdef TARGET_ARM
8147 struct target_eabi_flock64 *target_efl;
8148 #endif
8149
8150 cmd = target_to_host_fcntl_cmd(arg2);
8151 if (cmd == -TARGET_EINVAL) {
8152 ret = cmd;
8153 break;
8154 }
8155
8156 switch(arg2) {
8157 case TARGET_F_GETLK64:
8158 #ifdef TARGET_ARM
8159 if (((CPUARMState *)cpu_env)->eabi) {
8160 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8161 goto efault;
8162 fl.l_type = tswap16(target_efl->l_type);
8163 fl.l_whence = tswap16(target_efl->l_whence);
8164 fl.l_start = tswap64(target_efl->l_start);
8165 fl.l_len = tswap64(target_efl->l_len);
8166 fl.l_pid = tswap32(target_efl->l_pid);
8167 unlock_user_struct(target_efl, arg3, 0);
8168 } else
8169 #endif
8170 {
8171 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8172 goto efault;
8173 fl.l_type = tswap16(target_fl->l_type);
8174 fl.l_whence = tswap16(target_fl->l_whence);
8175 fl.l_start = tswap64(target_fl->l_start);
8176 fl.l_len = tswap64(target_fl->l_len);
8177 fl.l_pid = tswap32(target_fl->l_pid);
8178 unlock_user_struct(target_fl, arg3, 0);
8179 }
8180 ret = get_errno(fcntl(arg1, cmd, &fl));
8181 if (ret == 0) {
8182 #ifdef TARGET_ARM
8183 if (((CPUARMState *)cpu_env)->eabi) {
8184 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8185 goto efault;
8186 target_efl->l_type = tswap16(fl.l_type);
8187 target_efl->l_whence = tswap16(fl.l_whence);
8188 target_efl->l_start = tswap64(fl.l_start);
8189 target_efl->l_len = tswap64(fl.l_len);
8190 target_efl->l_pid = tswap32(fl.l_pid);
8191 unlock_user_struct(target_efl, arg3, 1);
8192 } else
8193 #endif
8194 {
8195 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8196 goto efault;
8197 target_fl->l_type = tswap16(fl.l_type);
8198 target_fl->l_whence = tswap16(fl.l_whence);
8199 target_fl->l_start = tswap64(fl.l_start);
8200 target_fl->l_len = tswap64(fl.l_len);
8201 target_fl->l_pid = tswap32(fl.l_pid);
8202 unlock_user_struct(target_fl, arg3, 1);
8203 }
8204 }
8205 break;
8206
8207 case TARGET_F_SETLK64:
8208 case TARGET_F_SETLKW64:
8209 #ifdef TARGET_ARM
8210 if (((CPUARMState *)cpu_env)->eabi) {
8211 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8212 goto efault;
8213 fl.l_type = tswap16(target_efl->l_type);
8214 fl.l_whence = tswap16(target_efl->l_whence);
8215 fl.l_start = tswap64(target_efl->l_start);
8216 fl.l_len = tswap64(target_efl->l_len);
8217 fl.l_pid = tswap32(target_efl->l_pid);
8218 unlock_user_struct(target_efl, arg3, 0);
8219 } else
8220 #endif
8221 {
8222 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8223 goto efault;
8224 fl.l_type = tswap16(target_fl->l_type);
8225 fl.l_whence = tswap16(target_fl->l_whence);
8226 fl.l_start = tswap64(target_fl->l_start);
8227 fl.l_len = tswap64(target_fl->l_len);
8228 fl.l_pid = tswap32(target_fl->l_pid);
8229 unlock_user_struct(target_fl, arg3, 0);
8230 }
8231 ret = get_errno(fcntl(arg1, cmd, &fl));
8232 break;
8233 default:
8234 ret = do_fcntl(arg1, arg2, arg3);
8235 break;
8236 }
8237 break;
8238 }
8239 #endif
8240 #ifdef TARGET_NR_cacheflush
8241 case TARGET_NR_cacheflush:
8242 /* self-modifying code is handled automatically, so nothing needed */
8243 ret = 0;
8244 break;
8245 #endif
8246 #ifdef TARGET_NR_security
8247 case TARGET_NR_security:
8248 goto unimplemented;
8249 #endif
8250 #ifdef TARGET_NR_getpagesize
8251 case TARGET_NR_getpagesize:
8252 ret = TARGET_PAGE_SIZE;
8253 break;
8254 #endif
8255 case TARGET_NR_gettid:
8256 ret = get_errno(gettid());
8257 break;
8258 #ifdef TARGET_NR_readahead
8259 case TARGET_NR_readahead:
8260 #if TARGET_ABI_BITS == 32
8261 if (regpairs_aligned(cpu_env)) {
8262 arg2 = arg3;
8263 arg3 = arg4;
8264 arg4 = arg5;
8265 }
8266 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8267 #else
8268 ret = get_errno(readahead(arg1, arg2, arg3));
8269 #endif
8270 break;
8271 #endif
8272 #ifdef CONFIG_ATTR
8273 #ifdef TARGET_NR_setxattr
8274 case TARGET_NR_listxattr:
8275 case TARGET_NR_llistxattr:
8276 {
8277 void *p, *b = 0;
8278 if (arg2) {
8279 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8280 if (!b) {
8281 ret = -TARGET_EFAULT;
8282 break;
8283 }
8284 }
8285 p = lock_user_string(arg1);
8286 if (p) {
8287 if (num == TARGET_NR_listxattr) {
8288 ret = get_errno(listxattr(p, b, arg3));
8289 } else {
8290 ret = get_errno(llistxattr(p, b, arg3));
8291 }
8292 } else {
8293 ret = -TARGET_EFAULT;
8294 }
8295 unlock_user(p, arg1, 0);
8296 unlock_user(b, arg2, arg3);
8297 break;
8298 }
8299 case TARGET_NR_flistxattr:
8300 {
8301 void *b = 0;
8302 if (arg2) {
8303 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8304 if (!b) {
8305 ret = -TARGET_EFAULT;
8306 break;
8307 }
8308 }
8309 ret = get_errno(flistxattr(arg1, b, arg3));
8310 unlock_user(b, arg2, arg3);
8311 break;
8312 }
8313 case TARGET_NR_setxattr:
8314 case TARGET_NR_lsetxattr:
8315 {
8316 void *p, *n, *v = 0;
8317 if (arg3) {
8318 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8319 if (!v) {
8320 ret = -TARGET_EFAULT;
8321 break;
8322 }
8323 }
8324 p = lock_user_string(arg1);
8325 n = lock_user_string(arg2);
8326 if (p && n) {
8327 if (num == TARGET_NR_setxattr) {
8328 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8329 } else {
8330 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8331 }
8332 } else {
8333 ret = -TARGET_EFAULT;
8334 }
8335 unlock_user(p, arg1, 0);
8336 unlock_user(n, arg2, 0);
8337 unlock_user(v, arg3, 0);
8338 }
8339 break;
8340 case TARGET_NR_fsetxattr:
8341 {
8342 void *n, *v = 0;
8343 if (arg3) {
8344 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8345 if (!v) {
8346 ret = -TARGET_EFAULT;
8347 break;
8348 }
8349 }
8350 n = lock_user_string(arg2);
8351 if (n) {
8352 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8353 } else {
8354 ret = -TARGET_EFAULT;
8355 }
8356 unlock_user(n, arg2, 0);
8357 unlock_user(v, arg3, 0);
8358 }
8359 break;
8360 case TARGET_NR_getxattr:
8361 case TARGET_NR_lgetxattr:
8362 {
8363 void *p, *n, *v = 0;
8364 if (arg3) {
8365 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8366 if (!v) {
8367 ret = -TARGET_EFAULT;
8368 break;
8369 }
8370 }
8371 p = lock_user_string(arg1);
8372 n = lock_user_string(arg2);
8373 if (p && n) {
8374 if (num == TARGET_NR_getxattr) {
8375 ret = get_errno(getxattr(p, n, v, arg4));
8376 } else {
8377 ret = get_errno(lgetxattr(p, n, v, arg4));
8378 }
8379 } else {
8380 ret = -TARGET_EFAULT;
8381 }
8382 unlock_user(p, arg1, 0);
8383 unlock_user(n, arg2, 0);
8384 unlock_user(v, arg3, arg4);
8385 }
8386 break;
8387 case TARGET_NR_fgetxattr:
8388 {
8389 void *n, *v = 0;
8390 if (arg3) {
8391 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8392 if (!v) {
8393 ret = -TARGET_EFAULT;
8394 break;
8395 }
8396 }
8397 n = lock_user_string(arg2);
8398 if (n) {
8399 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8400 } else {
8401 ret = -TARGET_EFAULT;
8402 }
8403 unlock_user(n, arg2, 0);
8404 unlock_user(v, arg3, arg4);
8405 }
8406 break;
8407 case TARGET_NR_removexattr:
8408 case TARGET_NR_lremovexattr:
8409 {
8410 void *p, *n;
8411 p = lock_user_string(arg1);
8412 n = lock_user_string(arg2);
8413 if (p && n) {
8414 if (num == TARGET_NR_removexattr) {
8415 ret = get_errno(removexattr(p, n));
8416 } else {
8417 ret = get_errno(lremovexattr(p, n));
8418 }
8419 } else {
8420 ret = -TARGET_EFAULT;
8421 }
8422 unlock_user(p, arg1, 0);
8423 unlock_user(n, arg2, 0);
8424 }
8425 break;
8426 case TARGET_NR_fremovexattr:
8427 {
8428 void *n;
8429 n = lock_user_string(arg2);
8430 if (n) {
8431 ret = get_errno(fremovexattr(arg1, n));
8432 } else {
8433 ret = -TARGET_EFAULT;
8434 }
8435 unlock_user(n, arg2, 0);
8436 }
8437 break;
8438 #endif
8439 #endif /* CONFIG_ATTR */
8440 #ifdef TARGET_NR_set_thread_area
8441 case TARGET_NR_set_thread_area:
8442 #if defined(TARGET_MIPS)
8443 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8444 ret = 0;
8445 break;
8446 #elif defined(TARGET_CRIS)
8447 if (arg1 & 0xff)
8448 ret = -TARGET_EINVAL;
8449 else {
8450 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8451 ret = 0;
8452 }
8453 break;
8454 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8455 ret = do_set_thread_area(cpu_env, arg1);
8456 break;
8457 #else
8458 goto unimplemented_nowarn;
8459 #endif
8460 #endif
8461 #ifdef TARGET_NR_get_thread_area
8462 case TARGET_NR_get_thread_area:
8463 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8464 ret = do_get_thread_area(cpu_env, arg1);
8465 #else
8466 goto unimplemented_nowarn;
8467 #endif
8468 #endif
8469 #ifdef TARGET_NR_getdomainname
8470 case TARGET_NR_getdomainname:
8471 goto unimplemented_nowarn;
8472 #endif
8473
8474 #ifdef TARGET_NR_clock_gettime
8475 case TARGET_NR_clock_gettime:
8476 {
8477 struct timespec ts;
8478 ret = get_errno(clock_gettime(arg1, &ts));
8479 if (!is_error(ret)) {
8480 host_to_target_timespec(arg2, &ts);
8481 }
8482 break;
8483 }
8484 #endif
8485 #ifdef TARGET_NR_clock_getres
8486 case TARGET_NR_clock_getres:
8487 {
8488 struct timespec ts;
8489 ret = get_errno(clock_getres(arg1, &ts));
8490 if (!is_error(ret)) {
8491 host_to_target_timespec(arg2, &ts);
8492 }
8493 break;
8494 }
8495 #endif
8496 #ifdef TARGET_NR_clock_nanosleep
8497 case TARGET_NR_clock_nanosleep:
8498 {
8499 struct timespec ts;
8500 target_to_host_timespec(&ts, arg3);
8501 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8502 if (arg4)
8503 host_to_target_timespec(arg4, &ts);
8504 break;
8505 }
8506 #endif
8507
8508 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8509 case TARGET_NR_set_tid_address:
8510 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8511 break;
8512 #endif
8513
8514 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8515 case TARGET_NR_tkill:
8516 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8517 break;
8518 #endif
8519
8520 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8521 case TARGET_NR_tgkill:
8522 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8523 target_to_host_signal(arg3)));
8524 break;
8525 #endif
8526
8527 #ifdef TARGET_NR_set_robust_list
8528 case TARGET_NR_set_robust_list:
8529 goto unimplemented_nowarn;
8530 #endif
8531
8532 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8533 case TARGET_NR_utimensat:
8534 {
8535 struct timespec *tsp, ts[2];
8536 if (!arg3) {
8537 tsp = NULL;
8538 } else {
8539 target_to_host_timespec(ts, arg3);
8540 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8541 tsp = ts;
8542 }
8543 if (!arg2)
8544 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8545 else {
8546 if (!(p = lock_user_string(arg2))) {
8547 ret = -TARGET_EFAULT;
8548 goto fail;
8549 }
8550 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8551 unlock_user(p, arg2, 0);
8552 }
8553 }
8554 break;
8555 #endif
8556 #if defined(CONFIG_USE_NPTL)
8557 case TARGET_NR_futex:
8558 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8559 break;
8560 #endif
8561 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8562 case TARGET_NR_inotify_init:
8563 ret = get_errno(sys_inotify_init());
8564 break;
8565 #endif
8566 #ifdef CONFIG_INOTIFY1
8567 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8568 case TARGET_NR_inotify_init1:
8569 ret = get_errno(sys_inotify_init1(arg1));
8570 break;
8571 #endif
8572 #endif
8573 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8574 case TARGET_NR_inotify_add_watch:
8575 p = lock_user_string(arg2);
8576 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8577 unlock_user(p, arg2, 0);
8578 break;
8579 #endif
8580 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8581 case TARGET_NR_inotify_rm_watch:
8582 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8583 break;
8584 #endif
8585
8586 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8587 case TARGET_NR_mq_open:
8588 {
8589 struct mq_attr posix_mq_attr;
8590
8591 p = lock_user_string(arg1 - 1);
8592 if (arg4 != 0)
8593 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8594 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8595 unlock_user (p, arg1, 0);
8596 }
8597 break;
8598
8599 case TARGET_NR_mq_unlink:
8600 p = lock_user_string(arg1 - 1);
8601 ret = get_errno(mq_unlink(p));
8602 unlock_user (p, arg1, 0);
8603 break;
8604
8605 case TARGET_NR_mq_timedsend:
8606 {
8607 struct timespec ts;
8608
8609 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8610 if (arg5 != 0) {
8611 target_to_host_timespec(&ts, arg5);
8612 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8613 host_to_target_timespec(arg5, &ts);
8614 }
8615 else
8616 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8617 unlock_user (p, arg2, arg3);
8618 }
8619 break;
8620
8621 case TARGET_NR_mq_timedreceive:
8622 {
8623 struct timespec ts;
8624 unsigned int prio;
8625
8626 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8627 if (arg5 != 0) {
8628 target_to_host_timespec(&ts, arg5);
8629 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8630 host_to_target_timespec(arg5, &ts);
8631 }
8632 else
8633 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8634 unlock_user (p, arg2, arg3);
8635 if (arg4 != 0)
8636 put_user_u32(prio, arg4);
8637 }
8638 break;
8639
8640 /* Not implemented for now... */
8641 /* case TARGET_NR_mq_notify: */
8642 /* break; */
8643
8644 case TARGET_NR_mq_getsetattr:
8645 {
8646 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8647 ret = 0;
8648 if (arg3 != 0) {
8649 ret = mq_getattr(arg1, &posix_mq_attr_out);
8650 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8651 }
8652 if (arg2 != 0) {
8653 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8654 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8655 }
8656
8657 }
8658 break;
8659 #endif
8660
8661 #ifdef CONFIG_SPLICE
8662 #ifdef TARGET_NR_tee
8663 case TARGET_NR_tee:
8664 {
8665 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8666 }
8667 break;
8668 #endif
8669 #ifdef TARGET_NR_splice
8670 case TARGET_NR_splice:
8671 {
8672 loff_t loff_in, loff_out;
8673 loff_t *ploff_in = NULL, *ploff_out = NULL;
8674 if(arg2) {
8675 get_user_u64(loff_in, arg2);
8676 ploff_in = &loff_in;
8677 }
8678 if(arg4) {
8679 get_user_u64(loff_out, arg2);
8680 ploff_out = &loff_out;
8681 }
8682 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8683 }
8684 break;
8685 #endif
8686 #ifdef TARGET_NR_vmsplice
8687 case TARGET_NR_vmsplice:
8688 {
8689 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8690 if (vec != NULL) {
8691 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8692 unlock_iovec(vec, arg2, arg3, 0);
8693 } else {
8694 ret = -host_to_target_errno(errno);
8695 }
8696 }
8697 break;
8698 #endif
8699 #endif /* CONFIG_SPLICE */
8700 #ifdef CONFIG_EVENTFD
8701 #if defined(TARGET_NR_eventfd)
8702 case TARGET_NR_eventfd:
8703 ret = get_errno(eventfd(arg1, 0));
8704 break;
8705 #endif
8706 #if defined(TARGET_NR_eventfd2)
8707 case TARGET_NR_eventfd2:
8708 ret = get_errno(eventfd(arg1, arg2));
8709 break;
8710 #endif
8711 #endif /* CONFIG_EVENTFD */
8712 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8713 case TARGET_NR_fallocate:
8714 #if TARGET_ABI_BITS == 32
8715 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8716 target_offset64(arg5, arg6)));
8717 #else
8718 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8719 #endif
8720 break;
8721 #endif
8722 #if defined(CONFIG_SYNC_FILE_RANGE)
8723 #if defined(TARGET_NR_sync_file_range)
8724 case TARGET_NR_sync_file_range:
8725 #if TARGET_ABI_BITS == 32
8726 #if defined(TARGET_MIPS)
8727 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8728 target_offset64(arg5, arg6), arg7));
8729 #else
8730 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8731 target_offset64(arg4, arg5), arg6));
8732 #endif /* !TARGET_MIPS */
8733 #else
8734 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8735 #endif
8736 break;
8737 #endif
8738 #if defined(TARGET_NR_sync_file_range2)
8739 case TARGET_NR_sync_file_range2:
8740 /* This is like sync_file_range but the arguments are reordered */
8741 #if TARGET_ABI_BITS == 32
8742 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8743 target_offset64(arg5, arg6), arg2));
8744 #else
8745 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8746 #endif
8747 break;
8748 #endif
8749 #endif
8750 #if defined(CONFIG_EPOLL)
8751 #if defined(TARGET_NR_epoll_create)
8752 case TARGET_NR_epoll_create:
8753 ret = get_errno(epoll_create(arg1));
8754 break;
8755 #endif
8756 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8757 case TARGET_NR_epoll_create1:
8758 ret = get_errno(epoll_create1(arg1));
8759 break;
8760 #endif
8761 #if defined(TARGET_NR_epoll_ctl)
8762 case TARGET_NR_epoll_ctl:
8763 {
8764 struct epoll_event ep;
8765 struct epoll_event *epp = 0;
8766 if (arg4) {
8767 struct target_epoll_event *target_ep;
8768 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8769 goto efault;
8770 }
8771 ep.events = tswap32(target_ep->events);
8772 /* The epoll_data_t union is just opaque data to the kernel,
8773 * so we transfer all 64 bits across and need not worry what
8774 * actual data type it is.
8775 */
8776 ep.data.u64 = tswap64(target_ep->data.u64);
8777 unlock_user_struct(target_ep, arg4, 0);
8778 epp = &ep;
8779 }
8780 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8781 break;
8782 }
8783 #endif
8784
8785 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8786 #define IMPLEMENT_EPOLL_PWAIT
8787 #endif
8788 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8789 #if defined(TARGET_NR_epoll_wait)
8790 case TARGET_NR_epoll_wait:
8791 #endif
8792 #if defined(IMPLEMENT_EPOLL_PWAIT)
8793 case TARGET_NR_epoll_pwait:
8794 #endif
8795 {
8796 struct target_epoll_event *target_ep;
8797 struct epoll_event *ep;
8798 int epfd = arg1;
8799 int maxevents = arg3;
8800 int timeout = arg4;
8801
8802 target_ep = lock_user(VERIFY_WRITE, arg2,
8803 maxevents * sizeof(struct target_epoll_event), 1);
8804 if (!target_ep) {
8805 goto efault;
8806 }
8807
8808 ep = alloca(maxevents * sizeof(struct epoll_event));
8809
8810 switch (num) {
8811 #if defined(IMPLEMENT_EPOLL_PWAIT)
8812 case TARGET_NR_epoll_pwait:
8813 {
8814 target_sigset_t *target_set;
8815 sigset_t _set, *set = &_set;
8816
8817 if (arg5) {
8818 target_set = lock_user(VERIFY_READ, arg5,
8819 sizeof(target_sigset_t), 1);
8820 if (!target_set) {
8821 unlock_user(target_ep, arg2, 0);
8822 goto efault;
8823 }
8824 target_to_host_sigset(set, target_set);
8825 unlock_user(target_set, arg5, 0);
8826 } else {
8827 set = NULL;
8828 }
8829
8830 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8831 break;
8832 }
8833 #endif
8834 #if defined(TARGET_NR_epoll_wait)
8835 case TARGET_NR_epoll_wait:
8836 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8837 break;
8838 #endif
8839 default:
8840 ret = -TARGET_ENOSYS;
8841 }
8842 if (!is_error(ret)) {
8843 int i;
8844 for (i = 0; i < ret; i++) {
8845 target_ep[i].events = tswap32(ep[i].events);
8846 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8847 }
8848 }
8849 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8850 break;
8851 }
8852 #endif
8853 #endif
8854 #ifdef TARGET_NR_prlimit64
8855 case TARGET_NR_prlimit64:
8856 {
8857 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8858 struct target_rlimit64 *target_rnew, *target_rold;
8859 struct host_rlimit64 rnew, rold, *rnewp = 0;
8860 if (arg3) {
8861 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8862 goto efault;
8863 }
8864 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8865 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8866 unlock_user_struct(target_rnew, arg3, 0);
8867 rnewp = &rnew;
8868 }
8869
8870 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8871 if (!is_error(ret) && arg4) {
8872 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8873 goto efault;
8874 }
8875 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8876 target_rold->rlim_max = tswap64(rold.rlim_max);
8877 unlock_user_struct(target_rold, arg4, 1);
8878 }
8879 break;
8880 }
8881 #endif
8882 #ifdef TARGET_NR_gethostname
8883 case TARGET_NR_gethostname:
8884 {
8885 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8886 if (name) {
8887 ret = get_errno(gethostname(name, arg2));
8888 unlock_user(name, arg1, arg2);
8889 } else {
8890 ret = -TARGET_EFAULT;
8891 }
8892 break;
8893 }
8894 #endif
8895 default:
8896 unimplemented:
8897 gemu_log("qemu: Unsupported syscall: %d\n", num);
8898 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8899 unimplemented_nowarn:
8900 #endif
8901 ret = -TARGET_ENOSYS;
8902 break;
8903 }
8904 fail:
8905 #ifdef DEBUG
8906 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8907 #endif
8908 if(do_strace)
8909 print_syscall_ret(num, ret);
8910 return ret;
8911 efault:
8912 ret = -TARGET_EFAULT;
8913 goto fail;
8914 }