]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
linux-user: pass sockaddr from host to target
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73 #ifdef CONFIG_ATTR
74 #include "qemu-xattr.h"
75 #endif
76
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include "linux_loop.h"
100 #include "cpu-uname.h"
101
102 #include "qemu.h"
103
104 #if defined(CONFIG_USE_NPTL)
105 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
106 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
107 #else
108 /* XXX: Hardcode the above values. */
109 #define CLONE_NPTL_FLAGS2 0
110 #endif
111
112 //#define DEBUG
113
114 //#include <linux/msdos_fs.h>
115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
117
118
119 #undef _syscall0
120 #undef _syscall1
121 #undef _syscall2
122 #undef _syscall3
123 #undef _syscall4
124 #undef _syscall5
125 #undef _syscall6
126
127 #define _syscall0(type,name) \
128 static type name (void) \
129 { \
130 return syscall(__NR_##name); \
131 }
132
133 #define _syscall1(type,name,type1,arg1) \
134 static type name (type1 arg1) \
135 { \
136 return syscall(__NR_##name, arg1); \
137 }
138
139 #define _syscall2(type,name,type1,arg1,type2,arg2) \
140 static type name (type1 arg1,type2 arg2) \
141 { \
142 return syscall(__NR_##name, arg1, arg2); \
143 }
144
145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
146 static type name (type1 arg1,type2 arg2,type3 arg3) \
147 { \
148 return syscall(__NR_##name, arg1, arg2, arg3); \
149 }
150
151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
153 { \
154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
155 }
156
157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
158 type5,arg5) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
160 { \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 }
163
164
165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 type5,arg5,type6,arg6) \
167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
168 type6 arg6) \
169 { \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 }
172
173
174 #define __NR_sys_uname __NR_uname
175 #define __NR_sys_faccessat __NR_faccessat
176 #define __NR_sys_fchmodat __NR_fchmodat
177 #define __NR_sys_fchownat __NR_fchownat
178 #define __NR_sys_fstatat64 __NR_fstatat64
179 #define __NR_sys_futimesat __NR_futimesat
180 #define __NR_sys_getcwd1 __NR_getcwd
181 #define __NR_sys_getdents __NR_getdents
182 #define __NR_sys_getdents64 __NR_getdents64
183 #define __NR_sys_getpriority __NR_getpriority
184 #define __NR_sys_linkat __NR_linkat
185 #define __NR_sys_mkdirat __NR_mkdirat
186 #define __NR_sys_mknodat __NR_mknodat
187 #define __NR_sys_newfstatat __NR_newfstatat
188 #define __NR_sys_openat __NR_openat
189 #define __NR_sys_readlinkat __NR_readlinkat
190 #define __NR_sys_renameat __NR_renameat
191 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
192 #define __NR_sys_symlinkat __NR_symlinkat
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_tgkill __NR_tgkill
195 #define __NR_sys_tkill __NR_tkill
196 #define __NR_sys_unlinkat __NR_unlinkat
197 #define __NR_sys_utimensat __NR_utimensat
198 #define __NR_sys_futex __NR_futex
199 #define __NR_sys_inotify_init __NR_inotify_init
200 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
201 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
202
203 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
204 defined(__s390x__)
205 #define __NR__llseek __NR_lseek
206 #endif
207
208 #ifdef __NR_gettid
209 _syscall0(int, gettid)
210 #else
211 /* This is a replacement for the host gettid() and must return a host
212 errno. */
213 static int gettid(void) {
214 return -ENOSYS;
215 }
216 #endif
217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
218 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
220 #endif
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
223 loff_t *, res, uint, wh);
224 #endif
225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
227 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
228 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
229 #endif
230 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
231 _syscall2(int,sys_tkill,int,tid,int,sig)
232 #endif
233 #ifdef __NR_exit_group
234 _syscall1(int,exit_group,int,error_code)
235 #endif
236 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
237 _syscall1(int,set_tid_address,int *,tidptr)
238 #endif
239 #if defined(CONFIG_USE_NPTL)
240 #if defined(TARGET_NR_futex) && defined(__NR_futex)
241 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
242 const struct timespec *,timeout,int *,uaddr2,int,val3)
243 #endif
244 #endif
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
250 unsigned long *, user_mask_ptr);
251 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
252 void *, arg);
253
254 static bitmask_transtbl fcntl_flags_tbl[] = {
255 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
256 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
257 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
258 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
259 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
260 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
261 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
262 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
263 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
264 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
265 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
266 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
267 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
268 #if defined(O_DIRECT)
269 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
270 #endif
271 #if defined(O_NOATIME)
272 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
273 #endif
274 #if defined(O_CLOEXEC)
275 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
276 #endif
277 #if defined(O_PATH)
278 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
279 #endif
280 /* Don't terminate the list prematurely on 64-bit host+guest. */
281 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
282 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
283 #endif
284 { 0, 0, 0, 0 }
285 };
286
287 #define COPY_UTSNAME_FIELD(dest, src) \
288 do { \
289 /* __NEW_UTS_LEN doesn't include terminating null */ \
290 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
291 (dest)[__NEW_UTS_LEN] = '\0'; \
292 } while (0)
293
294 static int sys_uname(struct new_utsname *buf)
295 {
296 struct utsname uts_buf;
297
298 if (uname(&uts_buf) < 0)
299 return (-1);
300
301 /*
302 * Just in case these have some differences, we
303 * translate utsname to new_utsname (which is the
304 * struct linux kernel uses).
305 */
306
307 memset(buf, 0, sizeof(*buf));
308 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
309 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
310 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
311 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
312 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
313 #ifdef _GNU_SOURCE
314 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
315 #endif
316 return (0);
317
318 #undef COPY_UTSNAME_FIELD
319 }
320
321 static int sys_getcwd1(char *buf, size_t size)
322 {
323 if (getcwd(buf, size) == NULL) {
324 /* getcwd() sets errno */
325 return (-1);
326 }
327 return strlen(buf)+1;
328 }
329
330 #ifdef CONFIG_ATFILE
331 /*
332 * Host system seems to have atfile syscall stubs available. We
333 * now enable them one by one as specified by target syscall_nr.h.
334 */
335
336 #ifdef TARGET_NR_faccessat
337 static int sys_faccessat(int dirfd, const char *pathname, int mode)
338 {
339 return (faccessat(dirfd, pathname, mode, 0));
340 }
341 #endif
342 #ifdef TARGET_NR_fchmodat
343 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
344 {
345 return (fchmodat(dirfd, pathname, mode, 0));
346 }
347 #endif
348 #if defined(TARGET_NR_fchownat)
349 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
350 gid_t group, int flags)
351 {
352 return (fchownat(dirfd, pathname, owner, group, flags));
353 }
354 #endif
355 #ifdef __NR_fstatat64
356 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
357 int flags)
358 {
359 return (fstatat(dirfd, pathname, buf, flags));
360 }
361 #endif
362 #ifdef __NR_newfstatat
363 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
364 int flags)
365 {
366 return (fstatat(dirfd, pathname, buf, flags));
367 }
368 #endif
369 #ifdef TARGET_NR_futimesat
370 static int sys_futimesat(int dirfd, const char *pathname,
371 const struct timeval times[2])
372 {
373 return (futimesat(dirfd, pathname, times));
374 }
375 #endif
376 #ifdef TARGET_NR_linkat
377 static int sys_linkat(int olddirfd, const char *oldpath,
378 int newdirfd, const char *newpath, int flags)
379 {
380 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
381 }
382 #endif
383 #ifdef TARGET_NR_mkdirat
384 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
385 {
386 return (mkdirat(dirfd, pathname, mode));
387 }
388 #endif
389 #ifdef TARGET_NR_mknodat
390 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
391 dev_t dev)
392 {
393 return (mknodat(dirfd, pathname, mode, dev));
394 }
395 #endif
396 #ifdef TARGET_NR_openat
397 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
398 {
399 /*
400 * open(2) has extra parameter 'mode' when called with
401 * flag O_CREAT.
402 */
403 if ((flags & O_CREAT) != 0) {
404 return (openat(dirfd, pathname, flags, mode));
405 }
406 return (openat(dirfd, pathname, flags));
407 }
408 #endif
409 #ifdef TARGET_NR_readlinkat
410 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
411 {
412 return (readlinkat(dirfd, pathname, buf, bufsiz));
413 }
414 #endif
415 #ifdef TARGET_NR_renameat
416 static int sys_renameat(int olddirfd, const char *oldpath,
417 int newdirfd, const char *newpath)
418 {
419 return (renameat(olddirfd, oldpath, newdirfd, newpath));
420 }
421 #endif
422 #ifdef TARGET_NR_symlinkat
423 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
424 {
425 return (symlinkat(oldpath, newdirfd, newpath));
426 }
427 #endif
428 #ifdef TARGET_NR_unlinkat
429 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
430 {
431 return (unlinkat(dirfd, pathname, flags));
432 }
433 #endif
434 #else /* !CONFIG_ATFILE */
435
436 /*
437 * Try direct syscalls instead
438 */
439 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
440 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
441 #endif
442 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
443 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
444 #endif
445 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
446 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
447 uid_t,owner,gid_t,group,int,flags)
448 #endif
449 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
450 defined(__NR_fstatat64)
451 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
452 struct stat *,buf,int,flags)
453 #endif
454 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
455 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
456 const struct timeval *,times)
457 #endif
458 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
459 defined(__NR_newfstatat)
460 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
461 struct stat *,buf,int,flags)
462 #endif
463 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
464 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
465 int,newdirfd,const char *,newpath,int,flags)
466 #endif
467 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
468 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
469 #endif
470 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
471 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
472 mode_t,mode,dev_t,dev)
473 #endif
474 #if defined(TARGET_NR_openat) && defined(__NR_openat)
475 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
476 #endif
477 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
478 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
479 char *,buf,size_t,bufsize)
480 #endif
481 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
482 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
483 int,newdirfd,const char *,newpath)
484 #endif
485 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
486 _syscall3(int,sys_symlinkat,const char *,oldpath,
487 int,newdirfd,const char *,newpath)
488 #endif
489 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
490 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
491 #endif
492
493 #endif /* CONFIG_ATFILE */
494
495 #ifdef CONFIG_UTIMENSAT
496 static int sys_utimensat(int dirfd, const char *pathname,
497 const struct timespec times[2], int flags)
498 {
499 if (pathname == NULL)
500 return futimens(dirfd, times);
501 else
502 return utimensat(dirfd, pathname, times, flags);
503 }
504 #else
505 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
506 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
507 const struct timespec *,tsp,int,flags)
508 #endif
509 #endif /* CONFIG_UTIMENSAT */
510
511 #ifdef CONFIG_INOTIFY
512 #include <sys/inotify.h>
513
514 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
515 static int sys_inotify_init(void)
516 {
517 return (inotify_init());
518 }
519 #endif
520 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
521 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
522 {
523 return (inotify_add_watch(fd, pathname, mask));
524 }
525 #endif
526 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
527 static int sys_inotify_rm_watch(int fd, int32_t wd)
528 {
529 return (inotify_rm_watch(fd, wd));
530 }
531 #endif
532 #ifdef CONFIG_INOTIFY1
533 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
534 static int sys_inotify_init1(int flags)
535 {
536 return (inotify_init1(flags));
537 }
538 #endif
539 #endif
540 #else
541 /* Userspace can usually survive runtime without inotify */
542 #undef TARGET_NR_inotify_init
543 #undef TARGET_NR_inotify_init1
544 #undef TARGET_NR_inotify_add_watch
545 #undef TARGET_NR_inotify_rm_watch
546 #endif /* CONFIG_INOTIFY */
547
548 #if defined(TARGET_NR_ppoll)
549 #ifndef __NR_ppoll
550 # define __NR_ppoll -1
551 #endif
552 #define __NR_sys_ppoll __NR_ppoll
553 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
554 struct timespec *, timeout, const __sigset_t *, sigmask,
555 size_t, sigsetsize)
556 #endif
557
558 #if defined(TARGET_NR_pselect6)
559 #ifndef __NR_pselect6
560 # define __NR_pselect6 -1
561 #endif
562 #define __NR_sys_pselect6 __NR_pselect6
563 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
564 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
565 #endif
566
567 #if defined(TARGET_NR_prlimit64)
568 #ifndef __NR_prlimit64
569 # define __NR_prlimit64 -1
570 #endif
571 #define __NR_sys_prlimit64 __NR_prlimit64
572 /* The glibc rlimit structure may not be that used by the underlying syscall */
573 struct host_rlimit64 {
574 uint64_t rlim_cur;
575 uint64_t rlim_max;
576 };
577 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
578 const struct host_rlimit64 *, new_limit,
579 struct host_rlimit64 *, old_limit)
580 #endif
581
582 extern int personality(int);
583 extern int flock(int, int);
584 extern int setfsuid(int);
585 extern int setfsgid(int);
586 extern int setgroups(int, gid_t *);
587
588 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
589 #ifdef TARGET_ARM
590 static inline int regpairs_aligned(void *cpu_env) {
591 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
592 }
593 #elif defined(TARGET_MIPS)
594 static inline int regpairs_aligned(void *cpu_env) { return 1; }
595 #else
596 static inline int regpairs_aligned(void *cpu_env) { return 0; }
597 #endif
598
599 #define ERRNO_TABLE_SIZE 1200
600
601 /* target_to_host_errno_table[] is initialized from
602 * host_to_target_errno_table[] in syscall_init(). */
603 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
604 };
605
606 /*
607 * This list is the union of errno values overridden in asm-<arch>/errno.h
608 * minus the errnos that are not actually generic to all archs.
609 */
610 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
611 [EIDRM] = TARGET_EIDRM,
612 [ECHRNG] = TARGET_ECHRNG,
613 [EL2NSYNC] = TARGET_EL2NSYNC,
614 [EL3HLT] = TARGET_EL3HLT,
615 [EL3RST] = TARGET_EL3RST,
616 [ELNRNG] = TARGET_ELNRNG,
617 [EUNATCH] = TARGET_EUNATCH,
618 [ENOCSI] = TARGET_ENOCSI,
619 [EL2HLT] = TARGET_EL2HLT,
620 [EDEADLK] = TARGET_EDEADLK,
621 [ENOLCK] = TARGET_ENOLCK,
622 [EBADE] = TARGET_EBADE,
623 [EBADR] = TARGET_EBADR,
624 [EXFULL] = TARGET_EXFULL,
625 [ENOANO] = TARGET_ENOANO,
626 [EBADRQC] = TARGET_EBADRQC,
627 [EBADSLT] = TARGET_EBADSLT,
628 [EBFONT] = TARGET_EBFONT,
629 [ENOSTR] = TARGET_ENOSTR,
630 [ENODATA] = TARGET_ENODATA,
631 [ETIME] = TARGET_ETIME,
632 [ENOSR] = TARGET_ENOSR,
633 [ENONET] = TARGET_ENONET,
634 [ENOPKG] = TARGET_ENOPKG,
635 [EREMOTE] = TARGET_EREMOTE,
636 [ENOLINK] = TARGET_ENOLINK,
637 [EADV] = TARGET_EADV,
638 [ESRMNT] = TARGET_ESRMNT,
639 [ECOMM] = TARGET_ECOMM,
640 [EPROTO] = TARGET_EPROTO,
641 [EDOTDOT] = TARGET_EDOTDOT,
642 [EMULTIHOP] = TARGET_EMULTIHOP,
643 [EBADMSG] = TARGET_EBADMSG,
644 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
645 [EOVERFLOW] = TARGET_EOVERFLOW,
646 [ENOTUNIQ] = TARGET_ENOTUNIQ,
647 [EBADFD] = TARGET_EBADFD,
648 [EREMCHG] = TARGET_EREMCHG,
649 [ELIBACC] = TARGET_ELIBACC,
650 [ELIBBAD] = TARGET_ELIBBAD,
651 [ELIBSCN] = TARGET_ELIBSCN,
652 [ELIBMAX] = TARGET_ELIBMAX,
653 [ELIBEXEC] = TARGET_ELIBEXEC,
654 [EILSEQ] = TARGET_EILSEQ,
655 [ENOSYS] = TARGET_ENOSYS,
656 [ELOOP] = TARGET_ELOOP,
657 [ERESTART] = TARGET_ERESTART,
658 [ESTRPIPE] = TARGET_ESTRPIPE,
659 [ENOTEMPTY] = TARGET_ENOTEMPTY,
660 [EUSERS] = TARGET_EUSERS,
661 [ENOTSOCK] = TARGET_ENOTSOCK,
662 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
663 [EMSGSIZE] = TARGET_EMSGSIZE,
664 [EPROTOTYPE] = TARGET_EPROTOTYPE,
665 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
666 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
667 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
668 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
669 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
670 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
671 [EADDRINUSE] = TARGET_EADDRINUSE,
672 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
673 [ENETDOWN] = TARGET_ENETDOWN,
674 [ENETUNREACH] = TARGET_ENETUNREACH,
675 [ENETRESET] = TARGET_ENETRESET,
676 [ECONNABORTED] = TARGET_ECONNABORTED,
677 [ECONNRESET] = TARGET_ECONNRESET,
678 [ENOBUFS] = TARGET_ENOBUFS,
679 [EISCONN] = TARGET_EISCONN,
680 [ENOTCONN] = TARGET_ENOTCONN,
681 [EUCLEAN] = TARGET_EUCLEAN,
682 [ENOTNAM] = TARGET_ENOTNAM,
683 [ENAVAIL] = TARGET_ENAVAIL,
684 [EISNAM] = TARGET_EISNAM,
685 [EREMOTEIO] = TARGET_EREMOTEIO,
686 [ESHUTDOWN] = TARGET_ESHUTDOWN,
687 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
688 [ETIMEDOUT] = TARGET_ETIMEDOUT,
689 [ECONNREFUSED] = TARGET_ECONNREFUSED,
690 [EHOSTDOWN] = TARGET_EHOSTDOWN,
691 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
692 [EALREADY] = TARGET_EALREADY,
693 [EINPROGRESS] = TARGET_EINPROGRESS,
694 [ESTALE] = TARGET_ESTALE,
695 [ECANCELED] = TARGET_ECANCELED,
696 [ENOMEDIUM] = TARGET_ENOMEDIUM,
697 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
698 #ifdef ENOKEY
699 [ENOKEY] = TARGET_ENOKEY,
700 #endif
701 #ifdef EKEYEXPIRED
702 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
703 #endif
704 #ifdef EKEYREVOKED
705 [EKEYREVOKED] = TARGET_EKEYREVOKED,
706 #endif
707 #ifdef EKEYREJECTED
708 [EKEYREJECTED] = TARGET_EKEYREJECTED,
709 #endif
710 #ifdef EOWNERDEAD
711 [EOWNERDEAD] = TARGET_EOWNERDEAD,
712 #endif
713 #ifdef ENOTRECOVERABLE
714 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
715 #endif
716 };
717
718 static inline int host_to_target_errno(int err)
719 {
720 if(host_to_target_errno_table[err])
721 return host_to_target_errno_table[err];
722 return err;
723 }
724
725 static inline int target_to_host_errno(int err)
726 {
727 if (target_to_host_errno_table[err])
728 return target_to_host_errno_table[err];
729 return err;
730 }
731
732 static inline abi_long get_errno(abi_long ret)
733 {
734 if (ret == -1)
735 return -host_to_target_errno(errno);
736 else
737 return ret;
738 }
739
740 static inline int is_error(abi_long ret)
741 {
742 return (abi_ulong)ret >= (abi_ulong)(-4096);
743 }
744
745 char *target_strerror(int err)
746 {
747 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
748 return NULL;
749 }
750 return strerror(target_to_host_errno(err));
751 }
752
753 static abi_ulong target_brk;
754 static abi_ulong target_original_brk;
755 static abi_ulong brk_page;
756
757 void target_set_brk(abi_ulong new_brk)
758 {
759 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
760 brk_page = HOST_PAGE_ALIGN(target_brk);
761 }
762
763 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
764 #define DEBUGF_BRK(message, args...)
765
766 /* do_brk() must return target values and target errnos. */
767 abi_long do_brk(abi_ulong new_brk)
768 {
769 abi_long mapped_addr;
770 int new_alloc_size;
771
772 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
773
774 if (!new_brk) {
775 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
776 return target_brk;
777 }
778 if (new_brk < target_original_brk) {
779 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
780 target_brk);
781 return target_brk;
782 }
783
784 /* If the new brk is less than the highest page reserved to the
785 * target heap allocation, set it and we're almost done... */
786 if (new_brk <= brk_page) {
787 /* Heap contents are initialized to zero, as for anonymous
788 * mapped pages. */
789 if (new_brk > target_brk) {
790 memset(g2h(target_brk), 0, new_brk - target_brk);
791 }
792 target_brk = new_brk;
793 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
794 return target_brk;
795 }
796
797 /* We need to allocate more memory after the brk... Note that
798 * we don't use MAP_FIXED because that will map over the top of
799 * any existing mapping (like the one with the host libc or qemu
800 * itself); instead we treat "mapped but at wrong address" as
801 * a failure and unmap again.
802 */
803 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
804 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
805 PROT_READ|PROT_WRITE,
806 MAP_ANON|MAP_PRIVATE, 0, 0));
807
808 if (mapped_addr == brk_page) {
809 /* Heap contents are initialized to zero, as for anonymous
810 * mapped pages. Technically the new pages are already
811 * initialized to zero since they *are* anonymous mapped
812 * pages, however we have to take care with the contents that
813 * come from the remaining part of the previous page: it may
814 * contains garbage data due to a previous heap usage (grown
815 * then shrunken). */
816 memset(g2h(target_brk), 0, brk_page - target_brk);
817
818 target_brk = new_brk;
819 brk_page = HOST_PAGE_ALIGN(target_brk);
820 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
821 target_brk);
822 return target_brk;
823 } else if (mapped_addr != -1) {
824 /* Mapped but at wrong address, meaning there wasn't actually
825 * enough space for this brk.
826 */
827 target_munmap(mapped_addr, new_alloc_size);
828 mapped_addr = -1;
829 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
830 }
831 else {
832 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
833 }
834
835 #if defined(TARGET_ALPHA)
836 /* We (partially) emulate OSF/1 on Alpha, which requires we
837 return a proper errno, not an unchanged brk value. */
838 return -TARGET_ENOMEM;
839 #endif
840 /* For everything else, return the previous break. */
841 return target_brk;
842 }
843
844 static inline abi_long copy_from_user_fdset(fd_set *fds,
845 abi_ulong target_fds_addr,
846 int n)
847 {
848 int i, nw, j, k;
849 abi_ulong b, *target_fds;
850
851 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
852 if (!(target_fds = lock_user(VERIFY_READ,
853 target_fds_addr,
854 sizeof(abi_ulong) * nw,
855 1)))
856 return -TARGET_EFAULT;
857
858 FD_ZERO(fds);
859 k = 0;
860 for (i = 0; i < nw; i++) {
861 /* grab the abi_ulong */
862 __get_user(b, &target_fds[i]);
863 for (j = 0; j < TARGET_ABI_BITS; j++) {
864 /* check the bit inside the abi_ulong */
865 if ((b >> j) & 1)
866 FD_SET(k, fds);
867 k++;
868 }
869 }
870
871 unlock_user(target_fds, target_fds_addr, 0);
872
873 return 0;
874 }
875
876 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
877 abi_ulong target_fds_addr,
878 int n)
879 {
880 if (target_fds_addr) {
881 if (copy_from_user_fdset(fds, target_fds_addr, n))
882 return -TARGET_EFAULT;
883 *fds_ptr = fds;
884 } else {
885 *fds_ptr = NULL;
886 }
887 return 0;
888 }
889
890 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
891 const fd_set *fds,
892 int n)
893 {
894 int i, nw, j, k;
895 abi_long v;
896 abi_ulong *target_fds;
897
898 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
899 if (!(target_fds = lock_user(VERIFY_WRITE,
900 target_fds_addr,
901 sizeof(abi_ulong) * nw,
902 0)))
903 return -TARGET_EFAULT;
904
905 k = 0;
906 for (i = 0; i < nw; i++) {
907 v = 0;
908 for (j = 0; j < TARGET_ABI_BITS; j++) {
909 v |= ((FD_ISSET(k, fds) != 0) << j);
910 k++;
911 }
912 __put_user(v, &target_fds[i]);
913 }
914
915 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
916
917 return 0;
918 }
919
920 #if defined(__alpha__)
921 #define HOST_HZ 1024
922 #else
923 #define HOST_HZ 100
924 #endif
925
926 static inline abi_long host_to_target_clock_t(long ticks)
927 {
928 #if HOST_HZ == TARGET_HZ
929 return ticks;
930 #else
931 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
932 #endif
933 }
934
935 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
936 const struct rusage *rusage)
937 {
938 struct target_rusage *target_rusage;
939
940 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
941 return -TARGET_EFAULT;
942 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
943 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
944 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
945 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
946 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
947 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
948 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
949 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
950 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
951 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
952 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
953 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
954 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
955 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
956 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
957 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
958 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
959 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
960 unlock_user_struct(target_rusage, target_addr, 1);
961
962 return 0;
963 }
964
965 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
966 {
967 abi_ulong target_rlim_swap;
968 rlim_t result;
969
970 target_rlim_swap = tswapal(target_rlim);
971 if (target_rlim_swap == TARGET_RLIM_INFINITY)
972 return RLIM_INFINITY;
973
974 result = target_rlim_swap;
975 if (target_rlim_swap != (rlim_t)result)
976 return RLIM_INFINITY;
977
978 return result;
979 }
980
981 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
982 {
983 abi_ulong target_rlim_swap;
984 abi_ulong result;
985
986 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
987 target_rlim_swap = TARGET_RLIM_INFINITY;
988 else
989 target_rlim_swap = rlim;
990 result = tswapal(target_rlim_swap);
991
992 return result;
993 }
994
995 static inline int target_to_host_resource(int code)
996 {
997 switch (code) {
998 case TARGET_RLIMIT_AS:
999 return RLIMIT_AS;
1000 case TARGET_RLIMIT_CORE:
1001 return RLIMIT_CORE;
1002 case TARGET_RLIMIT_CPU:
1003 return RLIMIT_CPU;
1004 case TARGET_RLIMIT_DATA:
1005 return RLIMIT_DATA;
1006 case TARGET_RLIMIT_FSIZE:
1007 return RLIMIT_FSIZE;
1008 case TARGET_RLIMIT_LOCKS:
1009 return RLIMIT_LOCKS;
1010 case TARGET_RLIMIT_MEMLOCK:
1011 return RLIMIT_MEMLOCK;
1012 case TARGET_RLIMIT_MSGQUEUE:
1013 return RLIMIT_MSGQUEUE;
1014 case TARGET_RLIMIT_NICE:
1015 return RLIMIT_NICE;
1016 case TARGET_RLIMIT_NOFILE:
1017 return RLIMIT_NOFILE;
1018 case TARGET_RLIMIT_NPROC:
1019 return RLIMIT_NPROC;
1020 case TARGET_RLIMIT_RSS:
1021 return RLIMIT_RSS;
1022 case TARGET_RLIMIT_RTPRIO:
1023 return RLIMIT_RTPRIO;
1024 case TARGET_RLIMIT_SIGPENDING:
1025 return RLIMIT_SIGPENDING;
1026 case TARGET_RLIMIT_STACK:
1027 return RLIMIT_STACK;
1028 default:
1029 return code;
1030 }
1031 }
1032
1033 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1034 abi_ulong target_tv_addr)
1035 {
1036 struct target_timeval *target_tv;
1037
1038 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1039 return -TARGET_EFAULT;
1040
1041 __get_user(tv->tv_sec, &target_tv->tv_sec);
1042 __get_user(tv->tv_usec, &target_tv->tv_usec);
1043
1044 unlock_user_struct(target_tv, target_tv_addr, 0);
1045
1046 return 0;
1047 }
1048
1049 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1050 const struct timeval *tv)
1051 {
1052 struct target_timeval *target_tv;
1053
1054 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1055 return -TARGET_EFAULT;
1056
1057 __put_user(tv->tv_sec, &target_tv->tv_sec);
1058 __put_user(tv->tv_usec, &target_tv->tv_usec);
1059
1060 unlock_user_struct(target_tv, target_tv_addr, 1);
1061
1062 return 0;
1063 }
1064
1065 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1066 #include <mqueue.h>
1067
1068 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1069 abi_ulong target_mq_attr_addr)
1070 {
1071 struct target_mq_attr *target_mq_attr;
1072
1073 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1074 target_mq_attr_addr, 1))
1075 return -TARGET_EFAULT;
1076
1077 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1078 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1079 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1080 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1081
1082 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1083
1084 return 0;
1085 }
1086
1087 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1088 const struct mq_attr *attr)
1089 {
1090 struct target_mq_attr *target_mq_attr;
1091
1092 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1093 target_mq_attr_addr, 0))
1094 return -TARGET_EFAULT;
1095
1096 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1097 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1098 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1099 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1100
1101 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1102
1103 return 0;
1104 }
1105 #endif
1106
1107 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1108 /* do_select() must return target values and target errnos. */
1109 static abi_long do_select(int n,
1110 abi_ulong rfd_addr, abi_ulong wfd_addr,
1111 abi_ulong efd_addr, abi_ulong target_tv_addr)
1112 {
1113 fd_set rfds, wfds, efds;
1114 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1115 struct timeval tv, *tv_ptr;
1116 abi_long ret;
1117
1118 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1119 if (ret) {
1120 return ret;
1121 }
1122 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1123 if (ret) {
1124 return ret;
1125 }
1126 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1127 if (ret) {
1128 return ret;
1129 }
1130
1131 if (target_tv_addr) {
1132 if (copy_from_user_timeval(&tv, target_tv_addr))
1133 return -TARGET_EFAULT;
1134 tv_ptr = &tv;
1135 } else {
1136 tv_ptr = NULL;
1137 }
1138
1139 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1140
1141 if (!is_error(ret)) {
1142 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1143 return -TARGET_EFAULT;
1144 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1145 return -TARGET_EFAULT;
1146 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1147 return -TARGET_EFAULT;
1148
1149 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1150 return -TARGET_EFAULT;
1151 }
1152
1153 return ret;
1154 }
1155 #endif
1156
1157 static abi_long do_pipe2(int host_pipe[], int flags)
1158 {
1159 #ifdef CONFIG_PIPE2
1160 return pipe2(host_pipe, flags);
1161 #else
1162 return -ENOSYS;
1163 #endif
1164 }
1165
1166 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1167 int flags, int is_pipe2)
1168 {
1169 int host_pipe[2];
1170 abi_long ret;
1171 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1172
1173 if (is_error(ret))
1174 return get_errno(ret);
1175
1176 /* Several targets have special calling conventions for the original
1177 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1178 if (!is_pipe2) {
1179 #if defined(TARGET_ALPHA)
1180 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1181 return host_pipe[0];
1182 #elif defined(TARGET_MIPS)
1183 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1184 return host_pipe[0];
1185 #elif defined(TARGET_SH4)
1186 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1187 return host_pipe[0];
1188 #endif
1189 }
1190
1191 if (put_user_s32(host_pipe[0], pipedes)
1192 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1193 return -TARGET_EFAULT;
1194 return get_errno(ret);
1195 }
1196
1197 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1198 abi_ulong target_addr,
1199 socklen_t len)
1200 {
1201 struct target_ip_mreqn *target_smreqn;
1202
1203 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1204 if (!target_smreqn)
1205 return -TARGET_EFAULT;
1206 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1207 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1208 if (len == sizeof(struct target_ip_mreqn))
1209 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1210 unlock_user(target_smreqn, target_addr, 0);
1211
1212 return 0;
1213 }
1214
1215 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1216 abi_ulong target_addr,
1217 socklen_t len)
1218 {
1219 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1220 sa_family_t sa_family;
1221 struct target_sockaddr *target_saddr;
1222
1223 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1224 if (!target_saddr)
1225 return -TARGET_EFAULT;
1226
1227 sa_family = tswap16(target_saddr->sa_family);
1228
1229 /* Oops. The caller might send a incomplete sun_path; sun_path
1230 * must be terminated by \0 (see the manual page), but
1231 * unfortunately it is quite common to specify sockaddr_un
1232 * length as "strlen(x->sun_path)" while it should be
1233 * "strlen(...) + 1". We'll fix that here if needed.
1234 * Linux kernel has a similar feature.
1235 */
1236
1237 if (sa_family == AF_UNIX) {
1238 if (len < unix_maxlen && len > 0) {
1239 char *cp = (char*)target_saddr;
1240
1241 if ( cp[len-1] && !cp[len] )
1242 len++;
1243 }
1244 if (len > unix_maxlen)
1245 len = unix_maxlen;
1246 }
1247
1248 memcpy(addr, target_saddr, len);
1249 addr->sa_family = sa_family;
1250 unlock_user(target_saddr, target_addr, 0);
1251
1252 return 0;
1253 }
1254
1255 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1256 struct sockaddr *addr,
1257 socklen_t len)
1258 {
1259 struct target_sockaddr *target_saddr;
1260
1261 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1262 if (!target_saddr)
1263 return -TARGET_EFAULT;
1264 memcpy(target_saddr, addr, len);
1265 target_saddr->sa_family = tswap16(addr->sa_family);
1266 unlock_user(target_saddr, target_addr, len);
1267
1268 return 0;
1269 }
1270
1271 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1272 struct target_msghdr *target_msgh)
1273 {
1274 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1275 abi_long msg_controllen;
1276 abi_ulong target_cmsg_addr;
1277 struct target_cmsghdr *target_cmsg;
1278 socklen_t space = 0;
1279
1280 msg_controllen = tswapal(target_msgh->msg_controllen);
1281 if (msg_controllen < sizeof (struct target_cmsghdr))
1282 goto the_end;
1283 target_cmsg_addr = tswapal(target_msgh->msg_control);
1284 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1285 if (!target_cmsg)
1286 return -TARGET_EFAULT;
1287
1288 while (cmsg && target_cmsg) {
1289 void *data = CMSG_DATA(cmsg);
1290 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1291
1292 int len = tswapal(target_cmsg->cmsg_len)
1293 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1294
1295 space += CMSG_SPACE(len);
1296 if (space > msgh->msg_controllen) {
1297 space -= CMSG_SPACE(len);
1298 gemu_log("Host cmsg overflow\n");
1299 break;
1300 }
1301
1302 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1303 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1304 cmsg->cmsg_len = CMSG_LEN(len);
1305
1306 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1307 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1308 memcpy(data, target_data, len);
1309 } else {
1310 int *fd = (int *)data;
1311 int *target_fd = (int *)target_data;
1312 int i, numfds = len / sizeof(int);
1313
1314 for (i = 0; i < numfds; i++)
1315 fd[i] = tswap32(target_fd[i]);
1316 }
1317
1318 cmsg = CMSG_NXTHDR(msgh, cmsg);
1319 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1320 }
1321 unlock_user(target_cmsg, target_cmsg_addr, 0);
1322 the_end:
1323 msgh->msg_controllen = space;
1324 return 0;
1325 }
1326
1327 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1328 struct msghdr *msgh)
1329 {
1330 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1331 abi_long msg_controllen;
1332 abi_ulong target_cmsg_addr;
1333 struct target_cmsghdr *target_cmsg;
1334 socklen_t space = 0;
1335
1336 msg_controllen = tswapal(target_msgh->msg_controllen);
1337 if (msg_controllen < sizeof (struct target_cmsghdr))
1338 goto the_end;
1339 target_cmsg_addr = tswapal(target_msgh->msg_control);
1340 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1341 if (!target_cmsg)
1342 return -TARGET_EFAULT;
1343
1344 while (cmsg && target_cmsg) {
1345 void *data = CMSG_DATA(cmsg);
1346 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1347
1348 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1349
1350 space += TARGET_CMSG_SPACE(len);
1351 if (space > msg_controllen) {
1352 space -= TARGET_CMSG_SPACE(len);
1353 gemu_log("Target cmsg overflow\n");
1354 break;
1355 }
1356
1357 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1358 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1359 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1360
1361 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1362 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1363 memcpy(target_data, data, len);
1364 } else {
1365 int *fd = (int *)data;
1366 int *target_fd = (int *)target_data;
1367 int i, numfds = len / sizeof(int);
1368
1369 for (i = 0; i < numfds; i++)
1370 target_fd[i] = tswap32(fd[i]);
1371 }
1372
1373 cmsg = CMSG_NXTHDR(msgh, cmsg);
1374 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1375 }
1376 unlock_user(target_cmsg, target_cmsg_addr, space);
1377 the_end:
1378 target_msgh->msg_controllen = tswapal(space);
1379 return 0;
1380 }
1381
1382 /* do_setsockopt() Must return target values and target errnos. */
1383 static abi_long do_setsockopt(int sockfd, int level, int optname,
1384 abi_ulong optval_addr, socklen_t optlen)
1385 {
1386 abi_long ret;
1387 int val;
1388 struct ip_mreqn *ip_mreq;
1389 struct ip_mreq_source *ip_mreq_source;
1390
1391 switch(level) {
1392 case SOL_TCP:
1393 /* TCP options all take an 'int' value. */
1394 if (optlen < sizeof(uint32_t))
1395 return -TARGET_EINVAL;
1396
1397 if (get_user_u32(val, optval_addr))
1398 return -TARGET_EFAULT;
1399 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1400 break;
1401 case SOL_IP:
1402 switch(optname) {
1403 case IP_TOS:
1404 case IP_TTL:
1405 case IP_HDRINCL:
1406 case IP_ROUTER_ALERT:
1407 case IP_RECVOPTS:
1408 case IP_RETOPTS:
1409 case IP_PKTINFO:
1410 case IP_MTU_DISCOVER:
1411 case IP_RECVERR:
1412 case IP_RECVTOS:
1413 #ifdef IP_FREEBIND
1414 case IP_FREEBIND:
1415 #endif
1416 case IP_MULTICAST_TTL:
1417 case IP_MULTICAST_LOOP:
1418 val = 0;
1419 if (optlen >= sizeof(uint32_t)) {
1420 if (get_user_u32(val, optval_addr))
1421 return -TARGET_EFAULT;
1422 } else if (optlen >= 1) {
1423 if (get_user_u8(val, optval_addr))
1424 return -TARGET_EFAULT;
1425 }
1426 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1427 break;
1428 case IP_ADD_MEMBERSHIP:
1429 case IP_DROP_MEMBERSHIP:
1430 if (optlen < sizeof (struct target_ip_mreq) ||
1431 optlen > sizeof (struct target_ip_mreqn))
1432 return -TARGET_EINVAL;
1433
1434 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1435 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1436 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1437 break;
1438
1439 case IP_BLOCK_SOURCE:
1440 case IP_UNBLOCK_SOURCE:
1441 case IP_ADD_SOURCE_MEMBERSHIP:
1442 case IP_DROP_SOURCE_MEMBERSHIP:
1443 if (optlen != sizeof (struct target_ip_mreq_source))
1444 return -TARGET_EINVAL;
1445
1446 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1447 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1448 unlock_user (ip_mreq_source, optval_addr, 0);
1449 break;
1450
1451 default:
1452 goto unimplemented;
1453 }
1454 break;
1455 case TARGET_SOL_SOCKET:
1456 switch (optname) {
1457 /* Options with 'int' argument. */
1458 case TARGET_SO_DEBUG:
1459 optname = SO_DEBUG;
1460 break;
1461 case TARGET_SO_REUSEADDR:
1462 optname = SO_REUSEADDR;
1463 break;
1464 case TARGET_SO_TYPE:
1465 optname = SO_TYPE;
1466 break;
1467 case TARGET_SO_ERROR:
1468 optname = SO_ERROR;
1469 break;
1470 case TARGET_SO_DONTROUTE:
1471 optname = SO_DONTROUTE;
1472 break;
1473 case TARGET_SO_BROADCAST:
1474 optname = SO_BROADCAST;
1475 break;
1476 case TARGET_SO_SNDBUF:
1477 optname = SO_SNDBUF;
1478 break;
1479 case TARGET_SO_RCVBUF:
1480 optname = SO_RCVBUF;
1481 break;
1482 case TARGET_SO_KEEPALIVE:
1483 optname = SO_KEEPALIVE;
1484 break;
1485 case TARGET_SO_OOBINLINE:
1486 optname = SO_OOBINLINE;
1487 break;
1488 case TARGET_SO_NO_CHECK:
1489 optname = SO_NO_CHECK;
1490 break;
1491 case TARGET_SO_PRIORITY:
1492 optname = SO_PRIORITY;
1493 break;
1494 #ifdef SO_BSDCOMPAT
1495 case TARGET_SO_BSDCOMPAT:
1496 optname = SO_BSDCOMPAT;
1497 break;
1498 #endif
1499 case TARGET_SO_PASSCRED:
1500 optname = SO_PASSCRED;
1501 break;
1502 case TARGET_SO_TIMESTAMP:
1503 optname = SO_TIMESTAMP;
1504 break;
1505 case TARGET_SO_RCVLOWAT:
1506 optname = SO_RCVLOWAT;
1507 break;
1508 case TARGET_SO_RCVTIMEO:
1509 optname = SO_RCVTIMEO;
1510 break;
1511 case TARGET_SO_SNDTIMEO:
1512 optname = SO_SNDTIMEO;
1513 break;
1514 break;
1515 default:
1516 goto unimplemented;
1517 }
1518 if (optlen < sizeof(uint32_t))
1519 return -TARGET_EINVAL;
1520
1521 if (get_user_u32(val, optval_addr))
1522 return -TARGET_EFAULT;
1523 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1524 break;
1525 default:
1526 unimplemented:
1527 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1528 ret = -TARGET_ENOPROTOOPT;
1529 }
1530 return ret;
1531 }
1532
1533 /* do_getsockopt() Must return target values and target errnos. */
1534 static abi_long do_getsockopt(int sockfd, int level, int optname,
1535 abi_ulong optval_addr, abi_ulong optlen)
1536 {
1537 abi_long ret;
1538 int len, val;
1539 socklen_t lv;
1540
1541 switch(level) {
1542 case TARGET_SOL_SOCKET:
1543 level = SOL_SOCKET;
1544 switch (optname) {
1545 /* These don't just return a single integer */
1546 case TARGET_SO_LINGER:
1547 case TARGET_SO_RCVTIMEO:
1548 case TARGET_SO_SNDTIMEO:
1549 case TARGET_SO_PEERNAME:
1550 goto unimplemented;
1551 case TARGET_SO_PEERCRED: {
1552 struct ucred cr;
1553 socklen_t crlen;
1554 struct target_ucred *tcr;
1555
1556 if (get_user_u32(len, optlen)) {
1557 return -TARGET_EFAULT;
1558 }
1559 if (len < 0) {
1560 return -TARGET_EINVAL;
1561 }
1562
1563 crlen = sizeof(cr);
1564 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1565 &cr, &crlen));
1566 if (ret < 0) {
1567 return ret;
1568 }
1569 if (len > crlen) {
1570 len = crlen;
1571 }
1572 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1573 return -TARGET_EFAULT;
1574 }
1575 __put_user(cr.pid, &tcr->pid);
1576 __put_user(cr.uid, &tcr->uid);
1577 __put_user(cr.gid, &tcr->gid);
1578 unlock_user_struct(tcr, optval_addr, 1);
1579 if (put_user_u32(len, optlen)) {
1580 return -TARGET_EFAULT;
1581 }
1582 break;
1583 }
1584 /* Options with 'int' argument. */
1585 case TARGET_SO_DEBUG:
1586 optname = SO_DEBUG;
1587 goto int_case;
1588 case TARGET_SO_REUSEADDR:
1589 optname = SO_REUSEADDR;
1590 goto int_case;
1591 case TARGET_SO_TYPE:
1592 optname = SO_TYPE;
1593 goto int_case;
1594 case TARGET_SO_ERROR:
1595 optname = SO_ERROR;
1596 goto int_case;
1597 case TARGET_SO_DONTROUTE:
1598 optname = SO_DONTROUTE;
1599 goto int_case;
1600 case TARGET_SO_BROADCAST:
1601 optname = SO_BROADCAST;
1602 goto int_case;
1603 case TARGET_SO_SNDBUF:
1604 optname = SO_SNDBUF;
1605 goto int_case;
1606 case TARGET_SO_RCVBUF:
1607 optname = SO_RCVBUF;
1608 goto int_case;
1609 case TARGET_SO_KEEPALIVE:
1610 optname = SO_KEEPALIVE;
1611 goto int_case;
1612 case TARGET_SO_OOBINLINE:
1613 optname = SO_OOBINLINE;
1614 goto int_case;
1615 case TARGET_SO_NO_CHECK:
1616 optname = SO_NO_CHECK;
1617 goto int_case;
1618 case TARGET_SO_PRIORITY:
1619 optname = SO_PRIORITY;
1620 goto int_case;
1621 #ifdef SO_BSDCOMPAT
1622 case TARGET_SO_BSDCOMPAT:
1623 optname = SO_BSDCOMPAT;
1624 goto int_case;
1625 #endif
1626 case TARGET_SO_PASSCRED:
1627 optname = SO_PASSCRED;
1628 goto int_case;
1629 case TARGET_SO_TIMESTAMP:
1630 optname = SO_TIMESTAMP;
1631 goto int_case;
1632 case TARGET_SO_RCVLOWAT:
1633 optname = SO_RCVLOWAT;
1634 goto int_case;
1635 default:
1636 goto int_case;
1637 }
1638 break;
1639 case SOL_TCP:
1640 /* TCP options all take an 'int' value. */
1641 int_case:
1642 if (get_user_u32(len, optlen))
1643 return -TARGET_EFAULT;
1644 if (len < 0)
1645 return -TARGET_EINVAL;
1646 lv = sizeof(lv);
1647 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1648 if (ret < 0)
1649 return ret;
1650 if (len > lv)
1651 len = lv;
1652 if (len == 4) {
1653 if (put_user_u32(val, optval_addr))
1654 return -TARGET_EFAULT;
1655 } else {
1656 if (put_user_u8(val, optval_addr))
1657 return -TARGET_EFAULT;
1658 }
1659 if (put_user_u32(len, optlen))
1660 return -TARGET_EFAULT;
1661 break;
1662 case SOL_IP:
1663 switch(optname) {
1664 case IP_TOS:
1665 case IP_TTL:
1666 case IP_HDRINCL:
1667 case IP_ROUTER_ALERT:
1668 case IP_RECVOPTS:
1669 case IP_RETOPTS:
1670 case IP_PKTINFO:
1671 case IP_MTU_DISCOVER:
1672 case IP_RECVERR:
1673 case IP_RECVTOS:
1674 #ifdef IP_FREEBIND
1675 case IP_FREEBIND:
1676 #endif
1677 case IP_MULTICAST_TTL:
1678 case IP_MULTICAST_LOOP:
1679 if (get_user_u32(len, optlen))
1680 return -TARGET_EFAULT;
1681 if (len < 0)
1682 return -TARGET_EINVAL;
1683 lv = sizeof(lv);
1684 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1685 if (ret < 0)
1686 return ret;
1687 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1688 len = 1;
1689 if (put_user_u32(len, optlen)
1690 || put_user_u8(val, optval_addr))
1691 return -TARGET_EFAULT;
1692 } else {
1693 if (len > sizeof(int))
1694 len = sizeof(int);
1695 if (put_user_u32(len, optlen)
1696 || put_user_u32(val, optval_addr))
1697 return -TARGET_EFAULT;
1698 }
1699 break;
1700 default:
1701 ret = -TARGET_ENOPROTOOPT;
1702 break;
1703 }
1704 break;
1705 default:
1706 unimplemented:
1707 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1708 level, optname);
1709 ret = -TARGET_EOPNOTSUPP;
1710 break;
1711 }
1712 return ret;
1713 }
1714
1715 /* FIXME
1716 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1717 * other lock functions have a return code of 0 for failure.
1718 */
1719 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1720 int count, int copy)
1721 {
1722 struct target_iovec *target_vec;
1723 abi_ulong base;
1724 int i;
1725
1726 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1727 if (!target_vec)
1728 return -TARGET_EFAULT;
1729 for(i = 0;i < count; i++) {
1730 base = tswapal(target_vec[i].iov_base);
1731 vec[i].iov_len = tswapal(target_vec[i].iov_len);
1732 if (vec[i].iov_len != 0) {
1733 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1734 /* Don't check lock_user return value. We must call writev even
1735 if a element has invalid base address. */
1736 } else {
1737 /* zero length pointer is ignored */
1738 vec[i].iov_base = NULL;
1739 }
1740 }
1741 unlock_user (target_vec, target_addr, 0);
1742 return 0;
1743 }
1744
1745 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1746 int count, int copy)
1747 {
1748 struct target_iovec *target_vec;
1749 abi_ulong base;
1750 int i;
1751
1752 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1753 if (!target_vec)
1754 return -TARGET_EFAULT;
1755 for(i = 0;i < count; i++) {
1756 if (target_vec[i].iov_base) {
1757 base = tswapal(target_vec[i].iov_base);
1758 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1759 }
1760 }
1761 unlock_user (target_vec, target_addr, 0);
1762
1763 return 0;
1764 }
1765
1766 /* do_socket() Must return target values and target errnos. */
1767 static abi_long do_socket(int domain, int type, int protocol)
1768 {
1769 #if defined(TARGET_MIPS)
1770 switch(type) {
1771 case TARGET_SOCK_DGRAM:
1772 type = SOCK_DGRAM;
1773 break;
1774 case TARGET_SOCK_STREAM:
1775 type = SOCK_STREAM;
1776 break;
1777 case TARGET_SOCK_RAW:
1778 type = SOCK_RAW;
1779 break;
1780 case TARGET_SOCK_RDM:
1781 type = SOCK_RDM;
1782 break;
1783 case TARGET_SOCK_SEQPACKET:
1784 type = SOCK_SEQPACKET;
1785 break;
1786 case TARGET_SOCK_PACKET:
1787 type = SOCK_PACKET;
1788 break;
1789 }
1790 #endif
1791 if (domain == PF_NETLINK)
1792 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1793 return get_errno(socket(domain, type, protocol));
1794 }
1795
1796 /* do_bind() Must return target values and target errnos. */
1797 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1798 socklen_t addrlen)
1799 {
1800 void *addr;
1801 abi_long ret;
1802
1803 if ((int)addrlen < 0) {
1804 return -TARGET_EINVAL;
1805 }
1806
1807 addr = alloca(addrlen+1);
1808
1809 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1810 if (ret)
1811 return ret;
1812
1813 return get_errno(bind(sockfd, addr, addrlen));
1814 }
1815
1816 /* do_connect() Must return target values and target errnos. */
1817 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1818 socklen_t addrlen)
1819 {
1820 void *addr;
1821 abi_long ret;
1822
1823 if ((int)addrlen < 0) {
1824 return -TARGET_EINVAL;
1825 }
1826
1827 addr = alloca(addrlen);
1828
1829 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1830 if (ret)
1831 return ret;
1832
1833 return get_errno(connect(sockfd, addr, addrlen));
1834 }
1835
1836 /* do_sendrecvmsg() Must return target values and target errnos. */
1837 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1838 int flags, int send)
1839 {
1840 abi_long ret, len;
1841 struct target_msghdr *msgp;
1842 struct msghdr msg;
1843 int count;
1844 struct iovec *vec;
1845 abi_ulong target_vec;
1846
1847 /* FIXME */
1848 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1849 msgp,
1850 target_msg,
1851 send ? 1 : 0))
1852 return -TARGET_EFAULT;
1853 if (msgp->msg_name) {
1854 msg.msg_namelen = tswap32(msgp->msg_namelen);
1855 msg.msg_name = alloca(msg.msg_namelen);
1856 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1857 msg.msg_namelen);
1858 if (ret) {
1859 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1860 return ret;
1861 }
1862 } else {
1863 msg.msg_name = NULL;
1864 msg.msg_namelen = 0;
1865 }
1866 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1867 msg.msg_control = alloca(msg.msg_controllen);
1868 msg.msg_flags = tswap32(msgp->msg_flags);
1869
1870 count = tswapal(msgp->msg_iovlen);
1871 vec = alloca(count * sizeof(struct iovec));
1872 target_vec = tswapal(msgp->msg_iov);
1873 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1874 msg.msg_iovlen = count;
1875 msg.msg_iov = vec;
1876
1877 if (send) {
1878 ret = target_to_host_cmsg(&msg, msgp);
1879 if (ret == 0)
1880 ret = get_errno(sendmsg(fd, &msg, flags));
1881 } else {
1882 ret = get_errno(recvmsg(fd, &msg, flags));
1883 if (!is_error(ret)) {
1884 len = ret;
1885 ret = host_to_target_cmsg(msgp, &msg);
1886 if (!is_error(ret)) {
1887 msgp->msg_namelen = tswap32(msg.msg_namelen);
1888 if (msg.msg_name != NULL) {
1889 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1890 msg.msg_name, msg.msg_namelen);
1891 if (ret) {
1892 goto out;
1893 }
1894 }
1895
1896 ret = len;
1897 }
1898 }
1899 }
1900
1901 out:
1902 unlock_iovec(vec, target_vec, count, !send);
1903 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1904 return ret;
1905 }
1906
1907 /* do_accept() Must return target values and target errnos. */
1908 static abi_long do_accept(int fd, abi_ulong target_addr,
1909 abi_ulong target_addrlen_addr)
1910 {
1911 socklen_t addrlen;
1912 void *addr;
1913 abi_long ret;
1914
1915 if (target_addr == 0)
1916 return get_errno(accept(fd, NULL, NULL));
1917
1918 /* linux returns EINVAL if addrlen pointer is invalid */
1919 if (get_user_u32(addrlen, target_addrlen_addr))
1920 return -TARGET_EINVAL;
1921
1922 if ((int)addrlen < 0) {
1923 return -TARGET_EINVAL;
1924 }
1925
1926 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1927 return -TARGET_EINVAL;
1928
1929 addr = alloca(addrlen);
1930
1931 ret = get_errno(accept(fd, addr, &addrlen));
1932 if (!is_error(ret)) {
1933 host_to_target_sockaddr(target_addr, addr, addrlen);
1934 if (put_user_u32(addrlen, target_addrlen_addr))
1935 ret = -TARGET_EFAULT;
1936 }
1937 return ret;
1938 }
1939
1940 /* do_getpeername() Must return target values and target errnos. */
1941 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1942 abi_ulong target_addrlen_addr)
1943 {
1944 socklen_t addrlen;
1945 void *addr;
1946 abi_long ret;
1947
1948 if (get_user_u32(addrlen, target_addrlen_addr))
1949 return -TARGET_EFAULT;
1950
1951 if ((int)addrlen < 0) {
1952 return -TARGET_EINVAL;
1953 }
1954
1955 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1956 return -TARGET_EFAULT;
1957
1958 addr = alloca(addrlen);
1959
1960 ret = get_errno(getpeername(fd, addr, &addrlen));
1961 if (!is_error(ret)) {
1962 host_to_target_sockaddr(target_addr, addr, addrlen);
1963 if (put_user_u32(addrlen, target_addrlen_addr))
1964 ret = -TARGET_EFAULT;
1965 }
1966 return ret;
1967 }
1968
1969 /* do_getsockname() Must return target values and target errnos. */
1970 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1971 abi_ulong target_addrlen_addr)
1972 {
1973 socklen_t addrlen;
1974 void *addr;
1975 abi_long ret;
1976
1977 if (get_user_u32(addrlen, target_addrlen_addr))
1978 return -TARGET_EFAULT;
1979
1980 if ((int)addrlen < 0) {
1981 return -TARGET_EINVAL;
1982 }
1983
1984 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1985 return -TARGET_EFAULT;
1986
1987 addr = alloca(addrlen);
1988
1989 ret = get_errno(getsockname(fd, addr, &addrlen));
1990 if (!is_error(ret)) {
1991 host_to_target_sockaddr(target_addr, addr, addrlen);
1992 if (put_user_u32(addrlen, target_addrlen_addr))
1993 ret = -TARGET_EFAULT;
1994 }
1995 return ret;
1996 }
1997
1998 /* do_socketpair() Must return target values and target errnos. */
1999 static abi_long do_socketpair(int domain, int type, int protocol,
2000 abi_ulong target_tab_addr)
2001 {
2002 int tab[2];
2003 abi_long ret;
2004
2005 ret = get_errno(socketpair(domain, type, protocol, tab));
2006 if (!is_error(ret)) {
2007 if (put_user_s32(tab[0], target_tab_addr)
2008 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2009 ret = -TARGET_EFAULT;
2010 }
2011 return ret;
2012 }
2013
2014 /* do_sendto() Must return target values and target errnos. */
2015 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2016 abi_ulong target_addr, socklen_t addrlen)
2017 {
2018 void *addr;
2019 void *host_msg;
2020 abi_long ret;
2021
2022 if ((int)addrlen < 0) {
2023 return -TARGET_EINVAL;
2024 }
2025
2026 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2027 if (!host_msg)
2028 return -TARGET_EFAULT;
2029 if (target_addr) {
2030 addr = alloca(addrlen);
2031 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2032 if (ret) {
2033 unlock_user(host_msg, msg, 0);
2034 return ret;
2035 }
2036 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2037 } else {
2038 ret = get_errno(send(fd, host_msg, len, flags));
2039 }
2040 unlock_user(host_msg, msg, 0);
2041 return ret;
2042 }
2043
2044 /* do_recvfrom() Must return target values and target errnos. */
2045 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2046 abi_ulong target_addr,
2047 abi_ulong target_addrlen)
2048 {
2049 socklen_t addrlen;
2050 void *addr;
2051 void *host_msg;
2052 abi_long ret;
2053
2054 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2055 if (!host_msg)
2056 return -TARGET_EFAULT;
2057 if (target_addr) {
2058 if (get_user_u32(addrlen, target_addrlen)) {
2059 ret = -TARGET_EFAULT;
2060 goto fail;
2061 }
2062 if ((int)addrlen < 0) {
2063 ret = -TARGET_EINVAL;
2064 goto fail;
2065 }
2066 addr = alloca(addrlen);
2067 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2068 } else {
2069 addr = NULL; /* To keep compiler quiet. */
2070 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2071 }
2072 if (!is_error(ret)) {
2073 if (target_addr) {
2074 host_to_target_sockaddr(target_addr, addr, addrlen);
2075 if (put_user_u32(addrlen, target_addrlen)) {
2076 ret = -TARGET_EFAULT;
2077 goto fail;
2078 }
2079 }
2080 unlock_user(host_msg, msg, len);
2081 } else {
2082 fail:
2083 unlock_user(host_msg, msg, 0);
2084 }
2085 return ret;
2086 }
2087
2088 #ifdef TARGET_NR_socketcall
2089 /* do_socketcall() Must return target values and target errnos. */
2090 static abi_long do_socketcall(int num, abi_ulong vptr)
2091 {
2092 abi_long ret;
2093 const int n = sizeof(abi_ulong);
2094
2095 switch(num) {
2096 case SOCKOP_socket:
2097 {
2098 abi_ulong domain, type, protocol;
2099
2100 if (get_user_ual(domain, vptr)
2101 || get_user_ual(type, vptr + n)
2102 || get_user_ual(protocol, vptr + 2 * n))
2103 return -TARGET_EFAULT;
2104
2105 ret = do_socket(domain, type, protocol);
2106 }
2107 break;
2108 case SOCKOP_bind:
2109 {
2110 abi_ulong sockfd;
2111 abi_ulong target_addr;
2112 socklen_t addrlen;
2113
2114 if (get_user_ual(sockfd, vptr)
2115 || get_user_ual(target_addr, vptr + n)
2116 || get_user_ual(addrlen, vptr + 2 * n))
2117 return -TARGET_EFAULT;
2118
2119 ret = do_bind(sockfd, target_addr, addrlen);
2120 }
2121 break;
2122 case SOCKOP_connect:
2123 {
2124 abi_ulong sockfd;
2125 abi_ulong target_addr;
2126 socklen_t addrlen;
2127
2128 if (get_user_ual(sockfd, vptr)
2129 || get_user_ual(target_addr, vptr + n)
2130 || get_user_ual(addrlen, vptr + 2 * n))
2131 return -TARGET_EFAULT;
2132
2133 ret = do_connect(sockfd, target_addr, addrlen);
2134 }
2135 break;
2136 case SOCKOP_listen:
2137 {
2138 abi_ulong sockfd, backlog;
2139
2140 if (get_user_ual(sockfd, vptr)
2141 || get_user_ual(backlog, vptr + n))
2142 return -TARGET_EFAULT;
2143
2144 ret = get_errno(listen(sockfd, backlog));
2145 }
2146 break;
2147 case SOCKOP_accept:
2148 {
2149 abi_ulong sockfd;
2150 abi_ulong target_addr, target_addrlen;
2151
2152 if (get_user_ual(sockfd, vptr)
2153 || get_user_ual(target_addr, vptr + n)
2154 || get_user_ual(target_addrlen, vptr + 2 * n))
2155 return -TARGET_EFAULT;
2156
2157 ret = do_accept(sockfd, target_addr, target_addrlen);
2158 }
2159 break;
2160 case SOCKOP_getsockname:
2161 {
2162 abi_ulong sockfd;
2163 abi_ulong target_addr, target_addrlen;
2164
2165 if (get_user_ual(sockfd, vptr)
2166 || get_user_ual(target_addr, vptr + n)
2167 || get_user_ual(target_addrlen, vptr + 2 * n))
2168 return -TARGET_EFAULT;
2169
2170 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2171 }
2172 break;
2173 case SOCKOP_getpeername:
2174 {
2175 abi_ulong sockfd;
2176 abi_ulong target_addr, target_addrlen;
2177
2178 if (get_user_ual(sockfd, vptr)
2179 || get_user_ual(target_addr, vptr + n)
2180 || get_user_ual(target_addrlen, vptr + 2 * n))
2181 return -TARGET_EFAULT;
2182
2183 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2184 }
2185 break;
2186 case SOCKOP_socketpair:
2187 {
2188 abi_ulong domain, type, protocol;
2189 abi_ulong tab;
2190
2191 if (get_user_ual(domain, vptr)
2192 || get_user_ual(type, vptr + n)
2193 || get_user_ual(protocol, vptr + 2 * n)
2194 || get_user_ual(tab, vptr + 3 * n))
2195 return -TARGET_EFAULT;
2196
2197 ret = do_socketpair(domain, type, protocol, tab);
2198 }
2199 break;
2200 case SOCKOP_send:
2201 {
2202 abi_ulong sockfd;
2203 abi_ulong msg;
2204 size_t len;
2205 abi_ulong flags;
2206
2207 if (get_user_ual(sockfd, vptr)
2208 || get_user_ual(msg, vptr + n)
2209 || get_user_ual(len, vptr + 2 * n)
2210 || get_user_ual(flags, vptr + 3 * n))
2211 return -TARGET_EFAULT;
2212
2213 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2214 }
2215 break;
2216 case SOCKOP_recv:
2217 {
2218 abi_ulong sockfd;
2219 abi_ulong msg;
2220 size_t len;
2221 abi_ulong flags;
2222
2223 if (get_user_ual(sockfd, vptr)
2224 || get_user_ual(msg, vptr + n)
2225 || get_user_ual(len, vptr + 2 * n)
2226 || get_user_ual(flags, vptr + 3 * n))
2227 return -TARGET_EFAULT;
2228
2229 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2230 }
2231 break;
2232 case SOCKOP_sendto:
2233 {
2234 abi_ulong sockfd;
2235 abi_ulong msg;
2236 size_t len;
2237 abi_ulong flags;
2238 abi_ulong addr;
2239 socklen_t addrlen;
2240
2241 if (get_user_ual(sockfd, vptr)
2242 || get_user_ual(msg, vptr + n)
2243 || get_user_ual(len, vptr + 2 * n)
2244 || get_user_ual(flags, vptr + 3 * n)
2245 || get_user_ual(addr, vptr + 4 * n)
2246 || get_user_ual(addrlen, vptr + 5 * n))
2247 return -TARGET_EFAULT;
2248
2249 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2250 }
2251 break;
2252 case SOCKOP_recvfrom:
2253 {
2254 abi_ulong sockfd;
2255 abi_ulong msg;
2256 size_t len;
2257 abi_ulong flags;
2258 abi_ulong addr;
2259 socklen_t addrlen;
2260
2261 if (get_user_ual(sockfd, vptr)
2262 || get_user_ual(msg, vptr + n)
2263 || get_user_ual(len, vptr + 2 * n)
2264 || get_user_ual(flags, vptr + 3 * n)
2265 || get_user_ual(addr, vptr + 4 * n)
2266 || get_user_ual(addrlen, vptr + 5 * n))
2267 return -TARGET_EFAULT;
2268
2269 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2270 }
2271 break;
2272 case SOCKOP_shutdown:
2273 {
2274 abi_ulong sockfd, how;
2275
2276 if (get_user_ual(sockfd, vptr)
2277 || get_user_ual(how, vptr + n))
2278 return -TARGET_EFAULT;
2279
2280 ret = get_errno(shutdown(sockfd, how));
2281 }
2282 break;
2283 case SOCKOP_sendmsg:
2284 case SOCKOP_recvmsg:
2285 {
2286 abi_ulong fd;
2287 abi_ulong target_msg;
2288 abi_ulong flags;
2289
2290 if (get_user_ual(fd, vptr)
2291 || get_user_ual(target_msg, vptr + n)
2292 || get_user_ual(flags, vptr + 2 * n))
2293 return -TARGET_EFAULT;
2294
2295 ret = do_sendrecvmsg(fd, target_msg, flags,
2296 (num == SOCKOP_sendmsg));
2297 }
2298 break;
2299 case SOCKOP_setsockopt:
2300 {
2301 abi_ulong sockfd;
2302 abi_ulong level;
2303 abi_ulong optname;
2304 abi_ulong optval;
2305 socklen_t optlen;
2306
2307 if (get_user_ual(sockfd, vptr)
2308 || get_user_ual(level, vptr + n)
2309 || get_user_ual(optname, vptr + 2 * n)
2310 || get_user_ual(optval, vptr + 3 * n)
2311 || get_user_ual(optlen, vptr + 4 * n))
2312 return -TARGET_EFAULT;
2313
2314 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2315 }
2316 break;
2317 case SOCKOP_getsockopt:
2318 {
2319 abi_ulong sockfd;
2320 abi_ulong level;
2321 abi_ulong optname;
2322 abi_ulong optval;
2323 socklen_t optlen;
2324
2325 if (get_user_ual(sockfd, vptr)
2326 || get_user_ual(level, vptr + n)
2327 || get_user_ual(optname, vptr + 2 * n)
2328 || get_user_ual(optval, vptr + 3 * n)
2329 || get_user_ual(optlen, vptr + 4 * n))
2330 return -TARGET_EFAULT;
2331
2332 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2333 }
2334 break;
2335 default:
2336 gemu_log("Unsupported socketcall: %d\n", num);
2337 ret = -TARGET_ENOSYS;
2338 break;
2339 }
2340 return ret;
2341 }
2342 #endif
2343
2344 #define N_SHM_REGIONS 32
2345
2346 static struct shm_region {
2347 abi_ulong start;
2348 abi_ulong size;
2349 } shm_regions[N_SHM_REGIONS];
2350
2351 struct target_ipc_perm
2352 {
2353 abi_long __key;
2354 abi_ulong uid;
2355 abi_ulong gid;
2356 abi_ulong cuid;
2357 abi_ulong cgid;
2358 unsigned short int mode;
2359 unsigned short int __pad1;
2360 unsigned short int __seq;
2361 unsigned short int __pad2;
2362 abi_ulong __unused1;
2363 abi_ulong __unused2;
2364 };
2365
2366 struct target_semid_ds
2367 {
2368 struct target_ipc_perm sem_perm;
2369 abi_ulong sem_otime;
2370 abi_ulong __unused1;
2371 abi_ulong sem_ctime;
2372 abi_ulong __unused2;
2373 abi_ulong sem_nsems;
2374 abi_ulong __unused3;
2375 abi_ulong __unused4;
2376 };
2377
2378 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2379 abi_ulong target_addr)
2380 {
2381 struct target_ipc_perm *target_ip;
2382 struct target_semid_ds *target_sd;
2383
2384 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2385 return -TARGET_EFAULT;
2386 target_ip = &(target_sd->sem_perm);
2387 host_ip->__key = tswapal(target_ip->__key);
2388 host_ip->uid = tswapal(target_ip->uid);
2389 host_ip->gid = tswapal(target_ip->gid);
2390 host_ip->cuid = tswapal(target_ip->cuid);
2391 host_ip->cgid = tswapal(target_ip->cgid);
2392 host_ip->mode = tswap16(target_ip->mode);
2393 unlock_user_struct(target_sd, target_addr, 0);
2394 return 0;
2395 }
2396
2397 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2398 struct ipc_perm *host_ip)
2399 {
2400 struct target_ipc_perm *target_ip;
2401 struct target_semid_ds *target_sd;
2402
2403 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2404 return -TARGET_EFAULT;
2405 target_ip = &(target_sd->sem_perm);
2406 target_ip->__key = tswapal(host_ip->__key);
2407 target_ip->uid = tswapal(host_ip->uid);
2408 target_ip->gid = tswapal(host_ip->gid);
2409 target_ip->cuid = tswapal(host_ip->cuid);
2410 target_ip->cgid = tswapal(host_ip->cgid);
2411 target_ip->mode = tswap16(host_ip->mode);
2412 unlock_user_struct(target_sd, target_addr, 1);
2413 return 0;
2414 }
2415
2416 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2417 abi_ulong target_addr)
2418 {
2419 struct target_semid_ds *target_sd;
2420
2421 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2422 return -TARGET_EFAULT;
2423 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2424 return -TARGET_EFAULT;
2425 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2426 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2427 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2428 unlock_user_struct(target_sd, target_addr, 0);
2429 return 0;
2430 }
2431
2432 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2433 struct semid_ds *host_sd)
2434 {
2435 struct target_semid_ds *target_sd;
2436
2437 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2438 return -TARGET_EFAULT;
2439 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2440 return -TARGET_EFAULT;
2441 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2442 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2443 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2444 unlock_user_struct(target_sd, target_addr, 1);
2445 return 0;
2446 }
2447
2448 struct target_seminfo {
2449 int semmap;
2450 int semmni;
2451 int semmns;
2452 int semmnu;
2453 int semmsl;
2454 int semopm;
2455 int semume;
2456 int semusz;
2457 int semvmx;
2458 int semaem;
2459 };
2460
2461 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2462 struct seminfo *host_seminfo)
2463 {
2464 struct target_seminfo *target_seminfo;
2465 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2466 return -TARGET_EFAULT;
2467 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2468 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2469 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2470 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2471 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2472 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2473 __put_user(host_seminfo->semume, &target_seminfo->semume);
2474 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2475 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2476 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2477 unlock_user_struct(target_seminfo, target_addr, 1);
2478 return 0;
2479 }
2480
2481 union semun {
2482 int val;
2483 struct semid_ds *buf;
2484 unsigned short *array;
2485 struct seminfo *__buf;
2486 };
2487
2488 union target_semun {
2489 int val;
2490 abi_ulong buf;
2491 abi_ulong array;
2492 abi_ulong __buf;
2493 };
2494
2495 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2496 abi_ulong target_addr)
2497 {
2498 int nsems;
2499 unsigned short *array;
2500 union semun semun;
2501 struct semid_ds semid_ds;
2502 int i, ret;
2503
2504 semun.buf = &semid_ds;
2505
2506 ret = semctl(semid, 0, IPC_STAT, semun);
2507 if (ret == -1)
2508 return get_errno(ret);
2509
2510 nsems = semid_ds.sem_nsems;
2511
2512 *host_array = malloc(nsems*sizeof(unsigned short));
2513 array = lock_user(VERIFY_READ, target_addr,
2514 nsems*sizeof(unsigned short), 1);
2515 if (!array)
2516 return -TARGET_EFAULT;
2517
2518 for(i=0; i<nsems; i++) {
2519 __get_user((*host_array)[i], &array[i]);
2520 }
2521 unlock_user(array, target_addr, 0);
2522
2523 return 0;
2524 }
2525
2526 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2527 unsigned short **host_array)
2528 {
2529 int nsems;
2530 unsigned short *array;
2531 union semun semun;
2532 struct semid_ds semid_ds;
2533 int i, ret;
2534
2535 semun.buf = &semid_ds;
2536
2537 ret = semctl(semid, 0, IPC_STAT, semun);
2538 if (ret == -1)
2539 return get_errno(ret);
2540
2541 nsems = semid_ds.sem_nsems;
2542
2543 array = lock_user(VERIFY_WRITE, target_addr,
2544 nsems*sizeof(unsigned short), 0);
2545 if (!array)
2546 return -TARGET_EFAULT;
2547
2548 for(i=0; i<nsems; i++) {
2549 __put_user((*host_array)[i], &array[i]);
2550 }
2551 free(*host_array);
2552 unlock_user(array, target_addr, 1);
2553
2554 return 0;
2555 }
2556
2557 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2558 union target_semun target_su)
2559 {
2560 union semun arg;
2561 struct semid_ds dsarg;
2562 unsigned short *array = NULL;
2563 struct seminfo seminfo;
2564 abi_long ret = -TARGET_EINVAL;
2565 abi_long err;
2566 cmd &= 0xff;
2567
2568 switch( cmd ) {
2569 case GETVAL:
2570 case SETVAL:
2571 arg.val = tswap32(target_su.val);
2572 ret = get_errno(semctl(semid, semnum, cmd, arg));
2573 target_su.val = tswap32(arg.val);
2574 break;
2575 case GETALL:
2576 case SETALL:
2577 err = target_to_host_semarray(semid, &array, target_su.array);
2578 if (err)
2579 return err;
2580 arg.array = array;
2581 ret = get_errno(semctl(semid, semnum, cmd, arg));
2582 err = host_to_target_semarray(semid, target_su.array, &array);
2583 if (err)
2584 return err;
2585 break;
2586 case IPC_STAT:
2587 case IPC_SET:
2588 case SEM_STAT:
2589 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2590 if (err)
2591 return err;
2592 arg.buf = &dsarg;
2593 ret = get_errno(semctl(semid, semnum, cmd, arg));
2594 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2595 if (err)
2596 return err;
2597 break;
2598 case IPC_INFO:
2599 case SEM_INFO:
2600 arg.__buf = &seminfo;
2601 ret = get_errno(semctl(semid, semnum, cmd, arg));
2602 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2603 if (err)
2604 return err;
2605 break;
2606 case IPC_RMID:
2607 case GETPID:
2608 case GETNCNT:
2609 case GETZCNT:
2610 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2611 break;
2612 }
2613
2614 return ret;
2615 }
2616
2617 struct target_sembuf {
2618 unsigned short sem_num;
2619 short sem_op;
2620 short sem_flg;
2621 };
2622
2623 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2624 abi_ulong target_addr,
2625 unsigned nsops)
2626 {
2627 struct target_sembuf *target_sembuf;
2628 int i;
2629
2630 target_sembuf = lock_user(VERIFY_READ, target_addr,
2631 nsops*sizeof(struct target_sembuf), 1);
2632 if (!target_sembuf)
2633 return -TARGET_EFAULT;
2634
2635 for(i=0; i<nsops; i++) {
2636 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2637 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2638 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2639 }
2640
2641 unlock_user(target_sembuf, target_addr, 0);
2642
2643 return 0;
2644 }
2645
2646 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2647 {
2648 struct sembuf sops[nsops];
2649
2650 if (target_to_host_sembuf(sops, ptr, nsops))
2651 return -TARGET_EFAULT;
2652
2653 return semop(semid, sops, nsops);
2654 }
2655
2656 struct target_msqid_ds
2657 {
2658 struct target_ipc_perm msg_perm;
2659 abi_ulong msg_stime;
2660 #if TARGET_ABI_BITS == 32
2661 abi_ulong __unused1;
2662 #endif
2663 abi_ulong msg_rtime;
2664 #if TARGET_ABI_BITS == 32
2665 abi_ulong __unused2;
2666 #endif
2667 abi_ulong msg_ctime;
2668 #if TARGET_ABI_BITS == 32
2669 abi_ulong __unused3;
2670 #endif
2671 abi_ulong __msg_cbytes;
2672 abi_ulong msg_qnum;
2673 abi_ulong msg_qbytes;
2674 abi_ulong msg_lspid;
2675 abi_ulong msg_lrpid;
2676 abi_ulong __unused4;
2677 abi_ulong __unused5;
2678 };
2679
2680 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2681 abi_ulong target_addr)
2682 {
2683 struct target_msqid_ds *target_md;
2684
2685 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2686 return -TARGET_EFAULT;
2687 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2688 return -TARGET_EFAULT;
2689 host_md->msg_stime = tswapal(target_md->msg_stime);
2690 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2691 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2692 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2693 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2694 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2695 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2696 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2697 unlock_user_struct(target_md, target_addr, 0);
2698 return 0;
2699 }
2700
2701 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2702 struct msqid_ds *host_md)
2703 {
2704 struct target_msqid_ds *target_md;
2705
2706 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2707 return -TARGET_EFAULT;
2708 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2709 return -TARGET_EFAULT;
2710 target_md->msg_stime = tswapal(host_md->msg_stime);
2711 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2712 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2713 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2714 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2715 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2716 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2717 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2718 unlock_user_struct(target_md, target_addr, 1);
2719 return 0;
2720 }
2721
2722 struct target_msginfo {
2723 int msgpool;
2724 int msgmap;
2725 int msgmax;
2726 int msgmnb;
2727 int msgmni;
2728 int msgssz;
2729 int msgtql;
2730 unsigned short int msgseg;
2731 };
2732
2733 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2734 struct msginfo *host_msginfo)
2735 {
2736 struct target_msginfo *target_msginfo;
2737 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2738 return -TARGET_EFAULT;
2739 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2740 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2741 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2742 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2743 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2744 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2745 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2746 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2747 unlock_user_struct(target_msginfo, target_addr, 1);
2748 return 0;
2749 }
2750
2751 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2752 {
2753 struct msqid_ds dsarg;
2754 struct msginfo msginfo;
2755 abi_long ret = -TARGET_EINVAL;
2756
2757 cmd &= 0xff;
2758
2759 switch (cmd) {
2760 case IPC_STAT:
2761 case IPC_SET:
2762 case MSG_STAT:
2763 if (target_to_host_msqid_ds(&dsarg,ptr))
2764 return -TARGET_EFAULT;
2765 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2766 if (host_to_target_msqid_ds(ptr,&dsarg))
2767 return -TARGET_EFAULT;
2768 break;
2769 case IPC_RMID:
2770 ret = get_errno(msgctl(msgid, cmd, NULL));
2771 break;
2772 case IPC_INFO:
2773 case MSG_INFO:
2774 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2775 if (host_to_target_msginfo(ptr, &msginfo))
2776 return -TARGET_EFAULT;
2777 break;
2778 }
2779
2780 return ret;
2781 }
2782
2783 struct target_msgbuf {
2784 abi_long mtype;
2785 char mtext[1];
2786 };
2787
2788 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2789 unsigned int msgsz, int msgflg)
2790 {
2791 struct target_msgbuf *target_mb;
2792 struct msgbuf *host_mb;
2793 abi_long ret = 0;
2794
2795 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2796 return -TARGET_EFAULT;
2797 host_mb = malloc(msgsz+sizeof(long));
2798 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2799 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2800 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2801 free(host_mb);
2802 unlock_user_struct(target_mb, msgp, 0);
2803
2804 return ret;
2805 }
2806
2807 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2808 unsigned int msgsz, abi_long msgtyp,
2809 int msgflg)
2810 {
2811 struct target_msgbuf *target_mb;
2812 char *target_mtext;
2813 struct msgbuf *host_mb;
2814 abi_long ret = 0;
2815
2816 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2817 return -TARGET_EFAULT;
2818
2819 host_mb = malloc(msgsz+sizeof(long));
2820 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2821
2822 if (ret > 0) {
2823 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2824 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2825 if (!target_mtext) {
2826 ret = -TARGET_EFAULT;
2827 goto end;
2828 }
2829 memcpy(target_mb->mtext, host_mb->mtext, ret);
2830 unlock_user(target_mtext, target_mtext_addr, ret);
2831 }
2832
2833 target_mb->mtype = tswapal(host_mb->mtype);
2834 free(host_mb);
2835
2836 end:
2837 if (target_mb)
2838 unlock_user_struct(target_mb, msgp, 1);
2839 return ret;
2840 }
2841
2842 struct target_shmid_ds
2843 {
2844 struct target_ipc_perm shm_perm;
2845 abi_ulong shm_segsz;
2846 abi_ulong shm_atime;
2847 #if TARGET_ABI_BITS == 32
2848 abi_ulong __unused1;
2849 #endif
2850 abi_ulong shm_dtime;
2851 #if TARGET_ABI_BITS == 32
2852 abi_ulong __unused2;
2853 #endif
2854 abi_ulong shm_ctime;
2855 #if TARGET_ABI_BITS == 32
2856 abi_ulong __unused3;
2857 #endif
2858 int shm_cpid;
2859 int shm_lpid;
2860 abi_ulong shm_nattch;
2861 unsigned long int __unused4;
2862 unsigned long int __unused5;
2863 };
2864
2865 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2866 abi_ulong target_addr)
2867 {
2868 struct target_shmid_ds *target_sd;
2869
2870 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2871 return -TARGET_EFAULT;
2872 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2873 return -TARGET_EFAULT;
2874 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2875 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2876 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2877 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2878 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2879 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2880 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2881 unlock_user_struct(target_sd, target_addr, 0);
2882 return 0;
2883 }
2884
2885 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2886 struct shmid_ds *host_sd)
2887 {
2888 struct target_shmid_ds *target_sd;
2889
2890 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2891 return -TARGET_EFAULT;
2892 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2893 return -TARGET_EFAULT;
2894 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2895 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2896 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2897 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2898 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2899 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2900 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2901 unlock_user_struct(target_sd, target_addr, 1);
2902 return 0;
2903 }
2904
2905 struct target_shminfo {
2906 abi_ulong shmmax;
2907 abi_ulong shmmin;
2908 abi_ulong shmmni;
2909 abi_ulong shmseg;
2910 abi_ulong shmall;
2911 };
2912
2913 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2914 struct shminfo *host_shminfo)
2915 {
2916 struct target_shminfo *target_shminfo;
2917 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2918 return -TARGET_EFAULT;
2919 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2920 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2921 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2922 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2923 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2924 unlock_user_struct(target_shminfo, target_addr, 1);
2925 return 0;
2926 }
2927
2928 struct target_shm_info {
2929 int used_ids;
2930 abi_ulong shm_tot;
2931 abi_ulong shm_rss;
2932 abi_ulong shm_swp;
2933 abi_ulong swap_attempts;
2934 abi_ulong swap_successes;
2935 };
2936
2937 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2938 struct shm_info *host_shm_info)
2939 {
2940 struct target_shm_info *target_shm_info;
2941 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2942 return -TARGET_EFAULT;
2943 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2944 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2945 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2946 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2947 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2948 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2949 unlock_user_struct(target_shm_info, target_addr, 1);
2950 return 0;
2951 }
2952
2953 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2954 {
2955 struct shmid_ds dsarg;
2956 struct shminfo shminfo;
2957 struct shm_info shm_info;
2958 abi_long ret = -TARGET_EINVAL;
2959
2960 cmd &= 0xff;
2961
2962 switch(cmd) {
2963 case IPC_STAT:
2964 case IPC_SET:
2965 case SHM_STAT:
2966 if (target_to_host_shmid_ds(&dsarg, buf))
2967 return -TARGET_EFAULT;
2968 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2969 if (host_to_target_shmid_ds(buf, &dsarg))
2970 return -TARGET_EFAULT;
2971 break;
2972 case IPC_INFO:
2973 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2974 if (host_to_target_shminfo(buf, &shminfo))
2975 return -TARGET_EFAULT;
2976 break;
2977 case SHM_INFO:
2978 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2979 if (host_to_target_shm_info(buf, &shm_info))
2980 return -TARGET_EFAULT;
2981 break;
2982 case IPC_RMID:
2983 case SHM_LOCK:
2984 case SHM_UNLOCK:
2985 ret = get_errno(shmctl(shmid, cmd, NULL));
2986 break;
2987 }
2988
2989 return ret;
2990 }
2991
2992 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2993 {
2994 abi_long raddr;
2995 void *host_raddr;
2996 struct shmid_ds shm_info;
2997 int i,ret;
2998
2999 /* find out the length of the shared memory segment */
3000 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3001 if (is_error(ret)) {
3002 /* can't get length, bail out */
3003 return ret;
3004 }
3005
3006 mmap_lock();
3007
3008 if (shmaddr)
3009 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3010 else {
3011 abi_ulong mmap_start;
3012
3013 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3014
3015 if (mmap_start == -1) {
3016 errno = ENOMEM;
3017 host_raddr = (void *)-1;
3018 } else
3019 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3020 }
3021
3022 if (host_raddr == (void *)-1) {
3023 mmap_unlock();
3024 return get_errno((long)host_raddr);
3025 }
3026 raddr=h2g((unsigned long)host_raddr);
3027
3028 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3029 PAGE_VALID | PAGE_READ |
3030 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3031
3032 for (i = 0; i < N_SHM_REGIONS; i++) {
3033 if (shm_regions[i].start == 0) {
3034 shm_regions[i].start = raddr;
3035 shm_regions[i].size = shm_info.shm_segsz;
3036 break;
3037 }
3038 }
3039
3040 mmap_unlock();
3041 return raddr;
3042
3043 }
3044
3045 static inline abi_long do_shmdt(abi_ulong shmaddr)
3046 {
3047 int i;
3048
3049 for (i = 0; i < N_SHM_REGIONS; ++i) {
3050 if (shm_regions[i].start == shmaddr) {
3051 shm_regions[i].start = 0;
3052 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3053 break;
3054 }
3055 }
3056
3057 return get_errno(shmdt(g2h(shmaddr)));
3058 }
3059
3060 #ifdef TARGET_NR_ipc
3061 /* ??? This only works with linear mappings. */
3062 /* do_ipc() must return target values and target errnos. */
3063 static abi_long do_ipc(unsigned int call, int first,
3064 int second, int third,
3065 abi_long ptr, abi_long fifth)
3066 {
3067 int version;
3068 abi_long ret = 0;
3069
3070 version = call >> 16;
3071 call &= 0xffff;
3072
3073 switch (call) {
3074 case IPCOP_semop:
3075 ret = do_semop(first, ptr, second);
3076 break;
3077
3078 case IPCOP_semget:
3079 ret = get_errno(semget(first, second, third));
3080 break;
3081
3082 case IPCOP_semctl:
3083 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3084 break;
3085
3086 case IPCOP_msgget:
3087 ret = get_errno(msgget(first, second));
3088 break;
3089
3090 case IPCOP_msgsnd:
3091 ret = do_msgsnd(first, ptr, second, third);
3092 break;
3093
3094 case IPCOP_msgctl:
3095 ret = do_msgctl(first, second, ptr);
3096 break;
3097
3098 case IPCOP_msgrcv:
3099 switch (version) {
3100 case 0:
3101 {
3102 struct target_ipc_kludge {
3103 abi_long msgp;
3104 abi_long msgtyp;
3105 } *tmp;
3106
3107 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3108 ret = -TARGET_EFAULT;
3109 break;
3110 }
3111
3112 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3113
3114 unlock_user_struct(tmp, ptr, 0);
3115 break;
3116 }
3117 default:
3118 ret = do_msgrcv(first, ptr, second, fifth, third);
3119 }
3120 break;
3121
3122 case IPCOP_shmat:
3123 switch (version) {
3124 default:
3125 {
3126 abi_ulong raddr;
3127 raddr = do_shmat(first, ptr, second);
3128 if (is_error(raddr))
3129 return get_errno(raddr);
3130 if (put_user_ual(raddr, third))
3131 return -TARGET_EFAULT;
3132 break;
3133 }
3134 case 1:
3135 ret = -TARGET_EINVAL;
3136 break;
3137 }
3138 break;
3139 case IPCOP_shmdt:
3140 ret = do_shmdt(ptr);
3141 break;
3142
3143 case IPCOP_shmget:
3144 /* IPC_* flag values are the same on all linux platforms */
3145 ret = get_errno(shmget(first, second, third));
3146 break;
3147
3148 /* IPC_* and SHM_* command values are the same on all linux platforms */
3149 case IPCOP_shmctl:
3150 ret = do_shmctl(first, second, third);
3151 break;
3152 default:
3153 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3154 ret = -TARGET_ENOSYS;
3155 break;
3156 }
3157 return ret;
3158 }
3159 #endif
3160
3161 /* kernel structure types definitions */
3162
3163 #define STRUCT(name, ...) STRUCT_ ## name,
3164 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3165 enum {
3166 #include "syscall_types.h"
3167 };
3168 #undef STRUCT
3169 #undef STRUCT_SPECIAL
3170
3171 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3172 #define STRUCT_SPECIAL(name)
3173 #include "syscall_types.h"
3174 #undef STRUCT
3175 #undef STRUCT_SPECIAL
3176
3177 typedef struct IOCTLEntry IOCTLEntry;
3178
3179 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3180 int fd, abi_long cmd, abi_long arg);
3181
3182 struct IOCTLEntry {
3183 unsigned int target_cmd;
3184 unsigned int host_cmd;
3185 const char *name;
3186 int access;
3187 do_ioctl_fn *do_ioctl;
3188 const argtype arg_type[5];
3189 };
3190
3191 #define IOC_R 0x0001
3192 #define IOC_W 0x0002
3193 #define IOC_RW (IOC_R | IOC_W)
3194
3195 #define MAX_STRUCT_SIZE 4096
3196
3197 #ifdef CONFIG_FIEMAP
3198 /* So fiemap access checks don't overflow on 32 bit systems.
3199 * This is very slightly smaller than the limit imposed by
3200 * the underlying kernel.
3201 */
3202 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3203 / sizeof(struct fiemap_extent))
3204
3205 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3206 int fd, abi_long cmd, abi_long arg)
3207 {
3208 /* The parameter for this ioctl is a struct fiemap followed
3209 * by an array of struct fiemap_extent whose size is set
3210 * in fiemap->fm_extent_count. The array is filled in by the
3211 * ioctl.
3212 */
3213 int target_size_in, target_size_out;
3214 struct fiemap *fm;
3215 const argtype *arg_type = ie->arg_type;
3216 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3217 void *argptr, *p;
3218 abi_long ret;
3219 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3220 uint32_t outbufsz;
3221 int free_fm = 0;
3222
3223 assert(arg_type[0] == TYPE_PTR);
3224 assert(ie->access == IOC_RW);
3225 arg_type++;
3226 target_size_in = thunk_type_size(arg_type, 0);
3227 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3228 if (!argptr) {
3229 return -TARGET_EFAULT;
3230 }
3231 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3232 unlock_user(argptr, arg, 0);
3233 fm = (struct fiemap *)buf_temp;
3234 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3235 return -TARGET_EINVAL;
3236 }
3237
3238 outbufsz = sizeof (*fm) +
3239 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3240
3241 if (outbufsz > MAX_STRUCT_SIZE) {
3242 /* We can't fit all the extents into the fixed size buffer.
3243 * Allocate one that is large enough and use it instead.
3244 */
3245 fm = malloc(outbufsz);
3246 if (!fm) {
3247 return -TARGET_ENOMEM;
3248 }
3249 memcpy(fm, buf_temp, sizeof(struct fiemap));
3250 free_fm = 1;
3251 }
3252 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3253 if (!is_error(ret)) {
3254 target_size_out = target_size_in;
3255 /* An extent_count of 0 means we were only counting the extents
3256 * so there are no structs to copy
3257 */
3258 if (fm->fm_extent_count != 0) {
3259 target_size_out += fm->fm_mapped_extents * extent_size;
3260 }
3261 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3262 if (!argptr) {
3263 ret = -TARGET_EFAULT;
3264 } else {
3265 /* Convert the struct fiemap */
3266 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3267 if (fm->fm_extent_count != 0) {
3268 p = argptr + target_size_in;
3269 /* ...and then all the struct fiemap_extents */
3270 for (i = 0; i < fm->fm_mapped_extents; i++) {
3271 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3272 THUNK_TARGET);
3273 p += extent_size;
3274 }
3275 }
3276 unlock_user(argptr, arg, target_size_out);
3277 }
3278 }
3279 if (free_fm) {
3280 free(fm);
3281 }
3282 return ret;
3283 }
3284 #endif
3285
3286 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3287 int fd, abi_long cmd, abi_long arg)
3288 {
3289 const argtype *arg_type = ie->arg_type;
3290 int target_size;
3291 void *argptr;
3292 int ret;
3293 struct ifconf *host_ifconf;
3294 uint32_t outbufsz;
3295 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3296 int target_ifreq_size;
3297 int nb_ifreq;
3298 int free_buf = 0;
3299 int i;
3300 int target_ifc_len;
3301 abi_long target_ifc_buf;
3302 int host_ifc_len;
3303 char *host_ifc_buf;
3304
3305 assert(arg_type[0] == TYPE_PTR);
3306 assert(ie->access == IOC_RW);
3307
3308 arg_type++;
3309 target_size = thunk_type_size(arg_type, 0);
3310
3311 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3312 if (!argptr)
3313 return -TARGET_EFAULT;
3314 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3315 unlock_user(argptr, arg, 0);
3316
3317 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3318 target_ifc_len = host_ifconf->ifc_len;
3319 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3320
3321 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3322 nb_ifreq = target_ifc_len / target_ifreq_size;
3323 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3324
3325 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3326 if (outbufsz > MAX_STRUCT_SIZE) {
3327 /* We can't fit all the extents into the fixed size buffer.
3328 * Allocate one that is large enough and use it instead.
3329 */
3330 host_ifconf = malloc(outbufsz);
3331 if (!host_ifconf) {
3332 return -TARGET_ENOMEM;
3333 }
3334 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3335 free_buf = 1;
3336 }
3337 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3338
3339 host_ifconf->ifc_len = host_ifc_len;
3340 host_ifconf->ifc_buf = host_ifc_buf;
3341
3342 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3343 if (!is_error(ret)) {
3344 /* convert host ifc_len to target ifc_len */
3345
3346 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3347 target_ifc_len = nb_ifreq * target_ifreq_size;
3348 host_ifconf->ifc_len = target_ifc_len;
3349
3350 /* restore target ifc_buf */
3351
3352 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3353
3354 /* copy struct ifconf to target user */
3355
3356 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3357 if (!argptr)
3358 return -TARGET_EFAULT;
3359 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3360 unlock_user(argptr, arg, target_size);
3361
3362 /* copy ifreq[] to target user */
3363
3364 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3365 for (i = 0; i < nb_ifreq ; i++) {
3366 thunk_convert(argptr + i * target_ifreq_size,
3367 host_ifc_buf + i * sizeof(struct ifreq),
3368 ifreq_arg_type, THUNK_TARGET);
3369 }
3370 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3371 }
3372
3373 if (free_buf) {
3374 free(host_ifconf);
3375 }
3376
3377 return ret;
3378 }
3379
3380 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3381 abi_long cmd, abi_long arg)
3382 {
3383 void *argptr;
3384 struct dm_ioctl *host_dm;
3385 abi_long guest_data;
3386 uint32_t guest_data_size;
3387 int target_size;
3388 const argtype *arg_type = ie->arg_type;
3389 abi_long ret;
3390 void *big_buf = NULL;
3391 char *host_data;
3392
3393 arg_type++;
3394 target_size = thunk_type_size(arg_type, 0);
3395 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3396 if (!argptr) {
3397 ret = -TARGET_EFAULT;
3398 goto out;
3399 }
3400 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3401 unlock_user(argptr, arg, 0);
3402
3403 /* buf_temp is too small, so fetch things into a bigger buffer */
3404 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3405 memcpy(big_buf, buf_temp, target_size);
3406 buf_temp = big_buf;
3407 host_dm = big_buf;
3408
3409 guest_data = arg + host_dm->data_start;
3410 if ((guest_data - arg) < 0) {
3411 ret = -EINVAL;
3412 goto out;
3413 }
3414 guest_data_size = host_dm->data_size - host_dm->data_start;
3415 host_data = (char*)host_dm + host_dm->data_start;
3416
3417 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3418 switch (ie->host_cmd) {
3419 case DM_REMOVE_ALL:
3420 case DM_LIST_DEVICES:
3421 case DM_DEV_CREATE:
3422 case DM_DEV_REMOVE:
3423 case DM_DEV_SUSPEND:
3424 case DM_DEV_STATUS:
3425 case DM_DEV_WAIT:
3426 case DM_TABLE_STATUS:
3427 case DM_TABLE_CLEAR:
3428 case DM_TABLE_DEPS:
3429 case DM_LIST_VERSIONS:
3430 /* no input data */
3431 break;
3432 case DM_DEV_RENAME:
3433 case DM_DEV_SET_GEOMETRY:
3434 /* data contains only strings */
3435 memcpy(host_data, argptr, guest_data_size);
3436 break;
3437 case DM_TARGET_MSG:
3438 memcpy(host_data, argptr, guest_data_size);
3439 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3440 break;
3441 case DM_TABLE_LOAD:
3442 {
3443 void *gspec = argptr;
3444 void *cur_data = host_data;
3445 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3446 int spec_size = thunk_type_size(arg_type, 0);
3447 int i;
3448
3449 for (i = 0; i < host_dm->target_count; i++) {
3450 struct dm_target_spec *spec = cur_data;
3451 uint32_t next;
3452 int slen;
3453
3454 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3455 slen = strlen((char*)gspec + spec_size) + 1;
3456 next = spec->next;
3457 spec->next = sizeof(*spec) + slen;
3458 strcpy((char*)&spec[1], gspec + spec_size);
3459 gspec += next;
3460 cur_data += spec->next;
3461 }
3462 break;
3463 }
3464 default:
3465 ret = -TARGET_EINVAL;
3466 goto out;
3467 }
3468 unlock_user(argptr, guest_data, 0);
3469
3470 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3471 if (!is_error(ret)) {
3472 guest_data = arg + host_dm->data_start;
3473 guest_data_size = host_dm->data_size - host_dm->data_start;
3474 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3475 switch (ie->host_cmd) {
3476 case DM_REMOVE_ALL:
3477 case DM_DEV_CREATE:
3478 case DM_DEV_REMOVE:
3479 case DM_DEV_RENAME:
3480 case DM_DEV_SUSPEND:
3481 case DM_DEV_STATUS:
3482 case DM_TABLE_LOAD:
3483 case DM_TABLE_CLEAR:
3484 case DM_TARGET_MSG:
3485 case DM_DEV_SET_GEOMETRY:
3486 /* no return data */
3487 break;
3488 case DM_LIST_DEVICES:
3489 {
3490 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3491 uint32_t remaining_data = guest_data_size;
3492 void *cur_data = argptr;
3493 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3494 int nl_size = 12; /* can't use thunk_size due to alignment */
3495
3496 while (1) {
3497 uint32_t next = nl->next;
3498 if (next) {
3499 nl->next = nl_size + (strlen(nl->name) + 1);
3500 }
3501 if (remaining_data < nl->next) {
3502 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3503 break;
3504 }
3505 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3506 strcpy(cur_data + nl_size, nl->name);
3507 cur_data += nl->next;
3508 remaining_data -= nl->next;
3509 if (!next) {
3510 break;
3511 }
3512 nl = (void*)nl + next;
3513 }
3514 break;
3515 }
3516 case DM_DEV_WAIT:
3517 case DM_TABLE_STATUS:
3518 {
3519 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3520 void *cur_data = argptr;
3521 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3522 int spec_size = thunk_type_size(arg_type, 0);
3523 int i;
3524
3525 for (i = 0; i < host_dm->target_count; i++) {
3526 uint32_t next = spec->next;
3527 int slen = strlen((char*)&spec[1]) + 1;
3528 spec->next = (cur_data - argptr) + spec_size + slen;
3529 if (guest_data_size < spec->next) {
3530 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3531 break;
3532 }
3533 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3534 strcpy(cur_data + spec_size, (char*)&spec[1]);
3535 cur_data = argptr + spec->next;
3536 spec = (void*)host_dm + host_dm->data_start + next;
3537 }
3538 break;
3539 }
3540 case DM_TABLE_DEPS:
3541 {
3542 void *hdata = (void*)host_dm + host_dm->data_start;
3543 int count = *(uint32_t*)hdata;
3544 uint64_t *hdev = hdata + 8;
3545 uint64_t *gdev = argptr + 8;
3546 int i;
3547
3548 *(uint32_t*)argptr = tswap32(count);
3549 for (i = 0; i < count; i++) {
3550 *gdev = tswap64(*hdev);
3551 gdev++;
3552 hdev++;
3553 }
3554 break;
3555 }
3556 case DM_LIST_VERSIONS:
3557 {
3558 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3559 uint32_t remaining_data = guest_data_size;
3560 void *cur_data = argptr;
3561 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3562 int vers_size = thunk_type_size(arg_type, 0);
3563
3564 while (1) {
3565 uint32_t next = vers->next;
3566 if (next) {
3567 vers->next = vers_size + (strlen(vers->name) + 1);
3568 }
3569 if (remaining_data < vers->next) {
3570 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3571 break;
3572 }
3573 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3574 strcpy(cur_data + vers_size, vers->name);
3575 cur_data += vers->next;
3576 remaining_data -= vers->next;
3577 if (!next) {
3578 break;
3579 }
3580 vers = (void*)vers + next;
3581 }
3582 break;
3583 }
3584 default:
3585 ret = -TARGET_EINVAL;
3586 goto out;
3587 }
3588 unlock_user(argptr, guest_data, guest_data_size);
3589
3590 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3591 if (!argptr) {
3592 ret = -TARGET_EFAULT;
3593 goto out;
3594 }
3595 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3596 unlock_user(argptr, arg, target_size);
3597 }
3598 out:
3599 if (big_buf) {
3600 free(big_buf);
3601 }
3602 return ret;
3603 }
3604
3605 static IOCTLEntry ioctl_entries[] = {
3606 #define IOCTL(cmd, access, ...) \
3607 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3608 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3609 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3610 #include "ioctls.h"
3611 { 0, 0, },
3612 };
3613
3614 /* ??? Implement proper locking for ioctls. */
3615 /* do_ioctl() Must return target values and target errnos. */
3616 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3617 {
3618 const IOCTLEntry *ie;
3619 const argtype *arg_type;
3620 abi_long ret;
3621 uint8_t buf_temp[MAX_STRUCT_SIZE];
3622 int target_size;
3623 void *argptr;
3624
3625 ie = ioctl_entries;
3626 for(;;) {
3627 if (ie->target_cmd == 0) {
3628 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3629 return -TARGET_ENOSYS;
3630 }
3631 if (ie->target_cmd == cmd)
3632 break;
3633 ie++;
3634 }
3635 arg_type = ie->arg_type;
3636 #if defined(DEBUG)
3637 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3638 #endif
3639 if (ie->do_ioctl) {
3640 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3641 }
3642
3643 switch(arg_type[0]) {
3644 case TYPE_NULL:
3645 /* no argument */
3646 ret = get_errno(ioctl(fd, ie->host_cmd));
3647 break;
3648 case TYPE_PTRVOID:
3649 case TYPE_INT:
3650 /* int argment */
3651 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3652 break;
3653 case TYPE_PTR:
3654 arg_type++;
3655 target_size = thunk_type_size(arg_type, 0);
3656 switch(ie->access) {
3657 case IOC_R:
3658 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3659 if (!is_error(ret)) {
3660 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3661 if (!argptr)
3662 return -TARGET_EFAULT;
3663 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3664 unlock_user(argptr, arg, target_size);
3665 }
3666 break;
3667 case IOC_W:
3668 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3669 if (!argptr)
3670 return -TARGET_EFAULT;
3671 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3672 unlock_user(argptr, arg, 0);
3673 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3674 break;
3675 default:
3676 case IOC_RW:
3677 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3678 if (!argptr)
3679 return -TARGET_EFAULT;
3680 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3681 unlock_user(argptr, arg, 0);
3682 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3683 if (!is_error(ret)) {
3684 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3685 if (!argptr)
3686 return -TARGET_EFAULT;
3687 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3688 unlock_user(argptr, arg, target_size);
3689 }
3690 break;
3691 }
3692 break;
3693 default:
3694 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3695 (long)cmd, arg_type[0]);
3696 ret = -TARGET_ENOSYS;
3697 break;
3698 }
3699 return ret;
3700 }
3701
3702 static const bitmask_transtbl iflag_tbl[] = {
3703 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3704 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3705 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3706 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3707 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3708 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3709 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3710 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3711 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3712 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3713 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3714 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3715 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3716 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3717 { 0, 0, 0, 0 }
3718 };
3719
3720 static const bitmask_transtbl oflag_tbl[] = {
3721 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3722 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3723 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3724 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3725 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3726 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3727 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3728 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3729 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3730 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3731 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3732 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3733 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3734 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3735 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3736 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3737 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3738 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3739 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3740 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3741 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3742 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3743 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3744 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3745 { 0, 0, 0, 0 }
3746 };
3747
3748 static const bitmask_transtbl cflag_tbl[] = {
3749 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3750 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3751 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3752 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3753 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3754 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3755 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3756 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3757 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3758 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3759 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3760 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3761 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3762 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3763 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3764 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3765 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3766 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3767 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3768 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3769 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3770 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3771 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3772 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3773 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3774 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3775 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3776 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3777 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3778 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3779 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3780 { 0, 0, 0, 0 }
3781 };
3782
3783 static const bitmask_transtbl lflag_tbl[] = {
3784 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3785 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3786 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3787 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3788 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3789 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3790 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3791 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3792 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3793 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3794 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3795 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3796 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3797 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3798 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3799 { 0, 0, 0, 0 }
3800 };
3801
3802 static void target_to_host_termios (void *dst, const void *src)
3803 {
3804 struct host_termios *host = dst;
3805 const struct target_termios *target = src;
3806
3807 host->c_iflag =
3808 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3809 host->c_oflag =
3810 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3811 host->c_cflag =
3812 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3813 host->c_lflag =
3814 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3815 host->c_line = target->c_line;
3816
3817 memset(host->c_cc, 0, sizeof(host->c_cc));
3818 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3819 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3820 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3821 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3822 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3823 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3824 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3825 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3826 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3827 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3828 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3829 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3830 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3831 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3832 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3833 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3834 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3835 }
3836
3837 static void host_to_target_termios (void *dst, const void *src)
3838 {
3839 struct target_termios *target = dst;
3840 const struct host_termios *host = src;
3841
3842 target->c_iflag =
3843 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3844 target->c_oflag =
3845 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3846 target->c_cflag =
3847 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3848 target->c_lflag =
3849 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3850 target->c_line = host->c_line;
3851
3852 memset(target->c_cc, 0, sizeof(target->c_cc));
3853 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3854 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3855 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3856 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3857 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3858 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3859 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3860 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3861 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3862 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3863 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3864 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3865 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3866 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3867 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3868 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3869 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3870 }
3871
3872 static const StructEntry struct_termios_def = {
3873 .convert = { host_to_target_termios, target_to_host_termios },
3874 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3875 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3876 };
3877
3878 static bitmask_transtbl mmap_flags_tbl[] = {
3879 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3880 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3881 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3882 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3883 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3884 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3885 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3886 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3887 { 0, 0, 0, 0 }
3888 };
3889
3890 #if defined(TARGET_I386)
3891
3892 /* NOTE: there is really one LDT for all the threads */
3893 static uint8_t *ldt_table;
3894
3895 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3896 {
3897 int size;
3898 void *p;
3899
3900 if (!ldt_table)
3901 return 0;
3902 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3903 if (size > bytecount)
3904 size = bytecount;
3905 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3906 if (!p)
3907 return -TARGET_EFAULT;
3908 /* ??? Should this by byteswapped? */
3909 memcpy(p, ldt_table, size);
3910 unlock_user(p, ptr, size);
3911 return size;
3912 }
3913
3914 /* XXX: add locking support */
3915 static abi_long write_ldt(CPUX86State *env,
3916 abi_ulong ptr, unsigned long bytecount, int oldmode)
3917 {
3918 struct target_modify_ldt_ldt_s ldt_info;
3919 struct target_modify_ldt_ldt_s *target_ldt_info;
3920 int seg_32bit, contents, read_exec_only, limit_in_pages;
3921 int seg_not_present, useable, lm;
3922 uint32_t *lp, entry_1, entry_2;
3923
3924 if (bytecount != sizeof(ldt_info))
3925 return -TARGET_EINVAL;
3926 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3927 return -TARGET_EFAULT;
3928 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3929 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3930 ldt_info.limit = tswap32(target_ldt_info->limit);
3931 ldt_info.flags = tswap32(target_ldt_info->flags);
3932 unlock_user_struct(target_ldt_info, ptr, 0);
3933
3934 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3935 return -TARGET_EINVAL;
3936 seg_32bit = ldt_info.flags & 1;
3937 contents = (ldt_info.flags >> 1) & 3;
3938 read_exec_only = (ldt_info.flags >> 3) & 1;
3939 limit_in_pages = (ldt_info.flags >> 4) & 1;
3940 seg_not_present = (ldt_info.flags >> 5) & 1;
3941 useable = (ldt_info.flags >> 6) & 1;
3942 #ifdef TARGET_ABI32
3943 lm = 0;
3944 #else
3945 lm = (ldt_info.flags >> 7) & 1;
3946 #endif
3947 if (contents == 3) {
3948 if (oldmode)
3949 return -TARGET_EINVAL;
3950 if (seg_not_present == 0)
3951 return -TARGET_EINVAL;
3952 }
3953 /* allocate the LDT */
3954 if (!ldt_table) {
3955 env->ldt.base = target_mmap(0,
3956 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3957 PROT_READ|PROT_WRITE,
3958 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3959 if (env->ldt.base == -1)
3960 return -TARGET_ENOMEM;
3961 memset(g2h(env->ldt.base), 0,
3962 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3963 env->ldt.limit = 0xffff;
3964 ldt_table = g2h(env->ldt.base);
3965 }
3966
3967 /* NOTE: same code as Linux kernel */
3968 /* Allow LDTs to be cleared by the user. */
3969 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3970 if (oldmode ||
3971 (contents == 0 &&
3972 read_exec_only == 1 &&
3973 seg_32bit == 0 &&
3974 limit_in_pages == 0 &&
3975 seg_not_present == 1 &&
3976 useable == 0 )) {
3977 entry_1 = 0;
3978 entry_2 = 0;
3979 goto install;
3980 }
3981 }
3982
3983 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3984 (ldt_info.limit & 0x0ffff);
3985 entry_2 = (ldt_info.base_addr & 0xff000000) |
3986 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3987 (ldt_info.limit & 0xf0000) |
3988 ((read_exec_only ^ 1) << 9) |
3989 (contents << 10) |
3990 ((seg_not_present ^ 1) << 15) |
3991 (seg_32bit << 22) |
3992 (limit_in_pages << 23) |
3993 (lm << 21) |
3994 0x7000;
3995 if (!oldmode)
3996 entry_2 |= (useable << 20);
3997
3998 /* Install the new entry ... */
3999 install:
4000 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4001 lp[0] = tswap32(entry_1);
4002 lp[1] = tswap32(entry_2);
4003 return 0;
4004 }
4005
4006 /* specific and weird i386 syscalls */
4007 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4008 unsigned long bytecount)
4009 {
4010 abi_long ret;
4011
4012 switch (func) {
4013 case 0:
4014 ret = read_ldt(ptr, bytecount);
4015 break;
4016 case 1:
4017 ret = write_ldt(env, ptr, bytecount, 1);
4018 break;
4019 case 0x11:
4020 ret = write_ldt(env, ptr, bytecount, 0);
4021 break;
4022 default:
4023 ret = -TARGET_ENOSYS;
4024 break;
4025 }
4026 return ret;
4027 }
4028
4029 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4030 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4031 {
4032 uint64_t *gdt_table = g2h(env->gdt.base);
4033 struct target_modify_ldt_ldt_s ldt_info;
4034 struct target_modify_ldt_ldt_s *target_ldt_info;
4035 int seg_32bit, contents, read_exec_only, limit_in_pages;
4036 int seg_not_present, useable, lm;
4037 uint32_t *lp, entry_1, entry_2;
4038 int i;
4039
4040 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4041 if (!target_ldt_info)
4042 return -TARGET_EFAULT;
4043 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4044 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4045 ldt_info.limit = tswap32(target_ldt_info->limit);
4046 ldt_info.flags = tswap32(target_ldt_info->flags);
4047 if (ldt_info.entry_number == -1) {
4048 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4049 if (gdt_table[i] == 0) {
4050 ldt_info.entry_number = i;
4051 target_ldt_info->entry_number = tswap32(i);
4052 break;
4053 }
4054 }
4055 }
4056 unlock_user_struct(target_ldt_info, ptr, 1);
4057
4058 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4059 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4060 return -TARGET_EINVAL;
4061 seg_32bit = ldt_info.flags & 1;
4062 contents = (ldt_info.flags >> 1) & 3;
4063 read_exec_only = (ldt_info.flags >> 3) & 1;
4064 limit_in_pages = (ldt_info.flags >> 4) & 1;
4065 seg_not_present = (ldt_info.flags >> 5) & 1;
4066 useable = (ldt_info.flags >> 6) & 1;
4067 #ifdef TARGET_ABI32
4068 lm = 0;
4069 #else
4070 lm = (ldt_info.flags >> 7) & 1;
4071 #endif
4072
4073 if (contents == 3) {
4074 if (seg_not_present == 0)
4075 return -TARGET_EINVAL;
4076 }
4077
4078 /* NOTE: same code as Linux kernel */
4079 /* Allow LDTs to be cleared by the user. */
4080 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4081 if ((contents == 0 &&
4082 read_exec_only == 1 &&
4083 seg_32bit == 0 &&
4084 limit_in_pages == 0 &&
4085 seg_not_present == 1 &&
4086 useable == 0 )) {
4087 entry_1 = 0;
4088 entry_2 = 0;
4089 goto install;
4090 }
4091 }
4092
4093 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4094 (ldt_info.limit & 0x0ffff);
4095 entry_2 = (ldt_info.base_addr & 0xff000000) |
4096 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4097 (ldt_info.limit & 0xf0000) |
4098 ((read_exec_only ^ 1) << 9) |
4099 (contents << 10) |
4100 ((seg_not_present ^ 1) << 15) |
4101 (seg_32bit << 22) |
4102 (limit_in_pages << 23) |
4103 (useable << 20) |
4104 (lm << 21) |
4105 0x7000;
4106
4107 /* Install the new entry ... */
4108 install:
4109 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4110 lp[0] = tswap32(entry_1);
4111 lp[1] = tswap32(entry_2);
4112 return 0;
4113 }
4114
4115 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4116 {
4117 struct target_modify_ldt_ldt_s *target_ldt_info;
4118 uint64_t *gdt_table = g2h(env->gdt.base);
4119 uint32_t base_addr, limit, flags;
4120 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4121 int seg_not_present, useable, lm;
4122 uint32_t *lp, entry_1, entry_2;
4123
4124 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4125 if (!target_ldt_info)
4126 return -TARGET_EFAULT;
4127 idx = tswap32(target_ldt_info->entry_number);
4128 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4129 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4130 unlock_user_struct(target_ldt_info, ptr, 1);
4131 return -TARGET_EINVAL;
4132 }
4133 lp = (uint32_t *)(gdt_table + idx);
4134 entry_1 = tswap32(lp[0]);
4135 entry_2 = tswap32(lp[1]);
4136
4137 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4138 contents = (entry_2 >> 10) & 3;
4139 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4140 seg_32bit = (entry_2 >> 22) & 1;
4141 limit_in_pages = (entry_2 >> 23) & 1;
4142 useable = (entry_2 >> 20) & 1;
4143 #ifdef TARGET_ABI32
4144 lm = 0;
4145 #else
4146 lm = (entry_2 >> 21) & 1;
4147 #endif
4148 flags = (seg_32bit << 0) | (contents << 1) |
4149 (read_exec_only << 3) | (limit_in_pages << 4) |
4150 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4151 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4152 base_addr = (entry_1 >> 16) |
4153 (entry_2 & 0xff000000) |
4154 ((entry_2 & 0xff) << 16);
4155 target_ldt_info->base_addr = tswapal(base_addr);
4156 target_ldt_info->limit = tswap32(limit);
4157 target_ldt_info->flags = tswap32(flags);
4158 unlock_user_struct(target_ldt_info, ptr, 1);
4159 return 0;
4160 }
4161 #endif /* TARGET_I386 && TARGET_ABI32 */
4162
4163 #ifndef TARGET_ABI32
4164 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4165 {
4166 abi_long ret = 0;
4167 abi_ulong val;
4168 int idx;
4169
4170 switch(code) {
4171 case TARGET_ARCH_SET_GS:
4172 case TARGET_ARCH_SET_FS:
4173 if (code == TARGET_ARCH_SET_GS)
4174 idx = R_GS;
4175 else
4176 idx = R_FS;
4177 cpu_x86_load_seg(env, idx, 0);
4178 env->segs[idx].base = addr;
4179 break;
4180 case TARGET_ARCH_GET_GS:
4181 case TARGET_ARCH_GET_FS:
4182 if (code == TARGET_ARCH_GET_GS)
4183 idx = R_GS;
4184 else
4185 idx = R_FS;
4186 val = env->segs[idx].base;
4187 if (put_user(val, addr, abi_ulong))
4188 ret = -TARGET_EFAULT;
4189 break;
4190 default:
4191 ret = -TARGET_EINVAL;
4192 break;
4193 }
4194 return ret;
4195 }
4196 #endif
4197
4198 #endif /* defined(TARGET_I386) */
4199
4200 #define NEW_STACK_SIZE 0x40000
4201
4202 #if defined(CONFIG_USE_NPTL)
4203
4204 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4205 typedef struct {
4206 CPUArchState *env;
4207 pthread_mutex_t mutex;
4208 pthread_cond_t cond;
4209 pthread_t thread;
4210 uint32_t tid;
4211 abi_ulong child_tidptr;
4212 abi_ulong parent_tidptr;
4213 sigset_t sigmask;
4214 } new_thread_info;
4215
4216 static void *clone_func(void *arg)
4217 {
4218 new_thread_info *info = arg;
4219 CPUArchState *env;
4220 TaskState *ts;
4221
4222 env = info->env;
4223 thread_env = env;
4224 ts = (TaskState *)thread_env->opaque;
4225 info->tid = gettid();
4226 env->host_tid = info->tid;
4227 task_settid(ts);
4228 if (info->child_tidptr)
4229 put_user_u32(info->tid, info->child_tidptr);
4230 if (info->parent_tidptr)
4231 put_user_u32(info->tid, info->parent_tidptr);
4232 /* Enable signals. */
4233 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4234 /* Signal to the parent that we're ready. */
4235 pthread_mutex_lock(&info->mutex);
4236 pthread_cond_broadcast(&info->cond);
4237 pthread_mutex_unlock(&info->mutex);
4238 /* Wait until the parent has finshed initializing the tls state. */
4239 pthread_mutex_lock(&clone_lock);
4240 pthread_mutex_unlock(&clone_lock);
4241 cpu_loop(env);
4242 /* never exits */
4243 return NULL;
4244 }
4245 #else
4246
4247 static int clone_func(void *arg)
4248 {
4249 CPUArchState *env = arg;
4250 cpu_loop(env);
4251 /* never exits */
4252 return 0;
4253 }
4254 #endif
4255
4256 /* do_fork() Must return host values and target errnos (unlike most
4257 do_*() functions). */
4258 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4259 abi_ulong parent_tidptr, target_ulong newtls,
4260 abi_ulong child_tidptr)
4261 {
4262 int ret;
4263 TaskState *ts;
4264 CPUArchState *new_env;
4265 #if defined(CONFIG_USE_NPTL)
4266 unsigned int nptl_flags;
4267 sigset_t sigmask;
4268 #else
4269 uint8_t *new_stack;
4270 #endif
4271
4272 /* Emulate vfork() with fork() */
4273 if (flags & CLONE_VFORK)
4274 flags &= ~(CLONE_VFORK | CLONE_VM);
4275
4276 if (flags & CLONE_VM) {
4277 TaskState *parent_ts = (TaskState *)env->opaque;
4278 #if defined(CONFIG_USE_NPTL)
4279 new_thread_info info;
4280 pthread_attr_t attr;
4281 #endif
4282 ts = g_malloc0(sizeof(TaskState));
4283 init_task_state(ts);
4284 /* we create a new CPU instance. */
4285 new_env = cpu_copy(env);
4286 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4287 cpu_reset(ENV_GET_CPU(new_env));
4288 #endif
4289 /* Init regs that differ from the parent. */
4290 cpu_clone_regs(new_env, newsp);
4291 new_env->opaque = ts;
4292 ts->bprm = parent_ts->bprm;
4293 ts->info = parent_ts->info;
4294 #if defined(CONFIG_USE_NPTL)
4295 nptl_flags = flags;
4296 flags &= ~CLONE_NPTL_FLAGS2;
4297
4298 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4299 ts->child_tidptr = child_tidptr;
4300 }
4301
4302 if (nptl_flags & CLONE_SETTLS)
4303 cpu_set_tls (new_env, newtls);
4304
4305 /* Grab a mutex so that thread setup appears atomic. */
4306 pthread_mutex_lock(&clone_lock);
4307
4308 memset(&info, 0, sizeof(info));
4309 pthread_mutex_init(&info.mutex, NULL);
4310 pthread_mutex_lock(&info.mutex);
4311 pthread_cond_init(&info.cond, NULL);
4312 info.env = new_env;
4313 if (nptl_flags & CLONE_CHILD_SETTID)
4314 info.child_tidptr = child_tidptr;
4315 if (nptl_flags & CLONE_PARENT_SETTID)
4316 info.parent_tidptr = parent_tidptr;
4317
4318 ret = pthread_attr_init(&attr);
4319 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4320 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4321 /* It is not safe to deliver signals until the child has finished
4322 initializing, so temporarily block all signals. */
4323 sigfillset(&sigmask);
4324 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4325
4326 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4327 /* TODO: Free new CPU state if thread creation failed. */
4328
4329 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4330 pthread_attr_destroy(&attr);
4331 if (ret == 0) {
4332 /* Wait for the child to initialize. */
4333 pthread_cond_wait(&info.cond, &info.mutex);
4334 ret = info.tid;
4335 if (flags & CLONE_PARENT_SETTID)
4336 put_user_u32(ret, parent_tidptr);
4337 } else {
4338 ret = -1;
4339 }
4340 pthread_mutex_unlock(&info.mutex);
4341 pthread_cond_destroy(&info.cond);
4342 pthread_mutex_destroy(&info.mutex);
4343 pthread_mutex_unlock(&clone_lock);
4344 #else
4345 if (flags & CLONE_NPTL_FLAGS2)
4346 return -EINVAL;
4347 /* This is probably going to die very quickly, but do it anyway. */
4348 new_stack = g_malloc0 (NEW_STACK_SIZE);
4349 #ifdef __ia64__
4350 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4351 #else
4352 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4353 #endif
4354 #endif
4355 } else {
4356 /* if no CLONE_VM, we consider it is a fork */
4357 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4358 return -EINVAL;
4359 fork_start();
4360 ret = fork();
4361 if (ret == 0) {
4362 /* Child Process. */
4363 cpu_clone_regs(env, newsp);
4364 fork_end(1);
4365 #if defined(CONFIG_USE_NPTL)
4366 /* There is a race condition here. The parent process could
4367 theoretically read the TID in the child process before the child
4368 tid is set. This would require using either ptrace
4369 (not implemented) or having *_tidptr to point at a shared memory
4370 mapping. We can't repeat the spinlock hack used above because
4371 the child process gets its own copy of the lock. */
4372 if (flags & CLONE_CHILD_SETTID)
4373 put_user_u32(gettid(), child_tidptr);
4374 if (flags & CLONE_PARENT_SETTID)
4375 put_user_u32(gettid(), parent_tidptr);
4376 ts = (TaskState *)env->opaque;
4377 if (flags & CLONE_SETTLS)
4378 cpu_set_tls (env, newtls);
4379 if (flags & CLONE_CHILD_CLEARTID)
4380 ts->child_tidptr = child_tidptr;
4381 #endif
4382 } else {
4383 fork_end(0);
4384 }
4385 }
4386 return ret;
4387 }
4388
4389 /* warning : doesn't handle linux specific flags... */
4390 static int target_to_host_fcntl_cmd(int cmd)
4391 {
4392 switch(cmd) {
4393 case TARGET_F_DUPFD:
4394 case TARGET_F_GETFD:
4395 case TARGET_F_SETFD:
4396 case TARGET_F_GETFL:
4397 case TARGET_F_SETFL:
4398 return cmd;
4399 case TARGET_F_GETLK:
4400 return F_GETLK;
4401 case TARGET_F_SETLK:
4402 return F_SETLK;
4403 case TARGET_F_SETLKW:
4404 return F_SETLKW;
4405 case TARGET_F_GETOWN:
4406 return F_GETOWN;
4407 case TARGET_F_SETOWN:
4408 return F_SETOWN;
4409 case TARGET_F_GETSIG:
4410 return F_GETSIG;
4411 case TARGET_F_SETSIG:
4412 return F_SETSIG;
4413 #if TARGET_ABI_BITS == 32
4414 case TARGET_F_GETLK64:
4415 return F_GETLK64;
4416 case TARGET_F_SETLK64:
4417 return F_SETLK64;
4418 case TARGET_F_SETLKW64:
4419 return F_SETLKW64;
4420 #endif
4421 case TARGET_F_SETLEASE:
4422 return F_SETLEASE;
4423 case TARGET_F_GETLEASE:
4424 return F_GETLEASE;
4425 #ifdef F_DUPFD_CLOEXEC
4426 case TARGET_F_DUPFD_CLOEXEC:
4427 return F_DUPFD_CLOEXEC;
4428 #endif
4429 case TARGET_F_NOTIFY:
4430 return F_NOTIFY;
4431 default:
4432 return -TARGET_EINVAL;
4433 }
4434 return -TARGET_EINVAL;
4435 }
4436
4437 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4438 {
4439 struct flock fl;
4440 struct target_flock *target_fl;
4441 struct flock64 fl64;
4442 struct target_flock64 *target_fl64;
4443 abi_long ret;
4444 int host_cmd = target_to_host_fcntl_cmd(cmd);
4445
4446 if (host_cmd == -TARGET_EINVAL)
4447 return host_cmd;
4448
4449 switch(cmd) {
4450 case TARGET_F_GETLK:
4451 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4452 return -TARGET_EFAULT;
4453 fl.l_type = tswap16(target_fl->l_type);
4454 fl.l_whence = tswap16(target_fl->l_whence);
4455 fl.l_start = tswapal(target_fl->l_start);
4456 fl.l_len = tswapal(target_fl->l_len);
4457 fl.l_pid = tswap32(target_fl->l_pid);
4458 unlock_user_struct(target_fl, arg, 0);
4459 ret = get_errno(fcntl(fd, host_cmd, &fl));
4460 if (ret == 0) {
4461 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4462 return -TARGET_EFAULT;
4463 target_fl->l_type = tswap16(fl.l_type);
4464 target_fl->l_whence = tswap16(fl.l_whence);
4465 target_fl->l_start = tswapal(fl.l_start);
4466 target_fl->l_len = tswapal(fl.l_len);
4467 target_fl->l_pid = tswap32(fl.l_pid);
4468 unlock_user_struct(target_fl, arg, 1);
4469 }
4470 break;
4471
4472 case TARGET_F_SETLK:
4473 case TARGET_F_SETLKW:
4474 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4475 return -TARGET_EFAULT;
4476 fl.l_type = tswap16(target_fl->l_type);
4477 fl.l_whence = tswap16(target_fl->l_whence);
4478 fl.l_start = tswapal(target_fl->l_start);
4479 fl.l_len = tswapal(target_fl->l_len);
4480 fl.l_pid = tswap32(target_fl->l_pid);
4481 unlock_user_struct(target_fl, arg, 0);
4482 ret = get_errno(fcntl(fd, host_cmd, &fl));
4483 break;
4484
4485 case TARGET_F_GETLK64:
4486 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4487 return -TARGET_EFAULT;
4488 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4489 fl64.l_whence = tswap16(target_fl64->l_whence);
4490 fl64.l_start = tswap64(target_fl64->l_start);
4491 fl64.l_len = tswap64(target_fl64->l_len);
4492 fl64.l_pid = tswap32(target_fl64->l_pid);
4493 unlock_user_struct(target_fl64, arg, 0);
4494 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4495 if (ret == 0) {
4496 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4497 return -TARGET_EFAULT;
4498 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4499 target_fl64->l_whence = tswap16(fl64.l_whence);
4500 target_fl64->l_start = tswap64(fl64.l_start);
4501 target_fl64->l_len = tswap64(fl64.l_len);
4502 target_fl64->l_pid = tswap32(fl64.l_pid);
4503 unlock_user_struct(target_fl64, arg, 1);
4504 }
4505 break;
4506 case TARGET_F_SETLK64:
4507 case TARGET_F_SETLKW64:
4508 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4509 return -TARGET_EFAULT;
4510 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4511 fl64.l_whence = tswap16(target_fl64->l_whence);
4512 fl64.l_start = tswap64(target_fl64->l_start);
4513 fl64.l_len = tswap64(target_fl64->l_len);
4514 fl64.l_pid = tswap32(target_fl64->l_pid);
4515 unlock_user_struct(target_fl64, arg, 0);
4516 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4517 break;
4518
4519 case TARGET_F_GETFL:
4520 ret = get_errno(fcntl(fd, host_cmd, arg));
4521 if (ret >= 0) {
4522 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4523 }
4524 break;
4525
4526 case TARGET_F_SETFL:
4527 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4528 break;
4529
4530 case TARGET_F_SETOWN:
4531 case TARGET_F_GETOWN:
4532 case TARGET_F_SETSIG:
4533 case TARGET_F_GETSIG:
4534 case TARGET_F_SETLEASE:
4535 case TARGET_F_GETLEASE:
4536 ret = get_errno(fcntl(fd, host_cmd, arg));
4537 break;
4538
4539 default:
4540 ret = get_errno(fcntl(fd, cmd, arg));
4541 break;
4542 }
4543 return ret;
4544 }
4545
4546 #ifdef USE_UID16
4547
4548 static inline int high2lowuid(int uid)
4549 {
4550 if (uid > 65535)
4551 return 65534;
4552 else
4553 return uid;
4554 }
4555
4556 static inline int high2lowgid(int gid)
4557 {
4558 if (gid > 65535)
4559 return 65534;
4560 else
4561 return gid;
4562 }
4563
4564 static inline int low2highuid(int uid)
4565 {
4566 if ((int16_t)uid == -1)
4567 return -1;
4568 else
4569 return uid;
4570 }
4571
4572 static inline int low2highgid(int gid)
4573 {
4574 if ((int16_t)gid == -1)
4575 return -1;
4576 else
4577 return gid;
4578 }
4579 static inline int tswapid(int id)
4580 {
4581 return tswap16(id);
4582 }
4583 #else /* !USE_UID16 */
4584 static inline int high2lowuid(int uid)
4585 {
4586 return uid;
4587 }
4588 static inline int high2lowgid(int gid)
4589 {
4590 return gid;
4591 }
4592 static inline int low2highuid(int uid)
4593 {
4594 return uid;
4595 }
4596 static inline int low2highgid(int gid)
4597 {
4598 return gid;
4599 }
4600 static inline int tswapid(int id)
4601 {
4602 return tswap32(id);
4603 }
4604 #endif /* USE_UID16 */
4605
4606 void syscall_init(void)
4607 {
4608 IOCTLEntry *ie;
4609 const argtype *arg_type;
4610 int size;
4611 int i;
4612
4613 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4614 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4615 #include "syscall_types.h"
4616 #undef STRUCT
4617 #undef STRUCT_SPECIAL
4618
4619 /* Build target_to_host_errno_table[] table from
4620 * host_to_target_errno_table[]. */
4621 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4622 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4623 }
4624
4625 /* we patch the ioctl size if necessary. We rely on the fact that
4626 no ioctl has all the bits at '1' in the size field */
4627 ie = ioctl_entries;
4628 while (ie->target_cmd != 0) {
4629 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4630 TARGET_IOC_SIZEMASK) {
4631 arg_type = ie->arg_type;
4632 if (arg_type[0] != TYPE_PTR) {
4633 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4634 ie->target_cmd);
4635 exit(1);
4636 }
4637 arg_type++;
4638 size = thunk_type_size(arg_type, 0);
4639 ie->target_cmd = (ie->target_cmd &
4640 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4641 (size << TARGET_IOC_SIZESHIFT);
4642 }
4643
4644 /* automatic consistency check if same arch */
4645 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4646 (defined(__x86_64__) && defined(TARGET_X86_64))
4647 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4648 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4649 ie->name, ie->target_cmd, ie->host_cmd);
4650 }
4651 #endif
4652 ie++;
4653 }
4654 }
4655
4656 #if TARGET_ABI_BITS == 32
4657 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4658 {
4659 #ifdef TARGET_WORDS_BIGENDIAN
4660 return ((uint64_t)word0 << 32) | word1;
4661 #else
4662 return ((uint64_t)word1 << 32) | word0;
4663 #endif
4664 }
4665 #else /* TARGET_ABI_BITS == 32 */
4666 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4667 {
4668 return word0;
4669 }
4670 #endif /* TARGET_ABI_BITS != 32 */
4671
4672 #ifdef TARGET_NR_truncate64
4673 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4674 abi_long arg2,
4675 abi_long arg3,
4676 abi_long arg4)
4677 {
4678 if (regpairs_aligned(cpu_env)) {
4679 arg2 = arg3;
4680 arg3 = arg4;
4681 }
4682 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4683 }
4684 #endif
4685
4686 #ifdef TARGET_NR_ftruncate64
4687 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4688 abi_long arg2,
4689 abi_long arg3,
4690 abi_long arg4)
4691 {
4692 if (regpairs_aligned(cpu_env)) {
4693 arg2 = arg3;
4694 arg3 = arg4;
4695 }
4696 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4697 }
4698 #endif
4699
4700 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4701 abi_ulong target_addr)
4702 {
4703 struct target_timespec *target_ts;
4704
4705 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4706 return -TARGET_EFAULT;
4707 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4708 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4709 unlock_user_struct(target_ts, target_addr, 0);
4710 return 0;
4711 }
4712
4713 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4714 struct timespec *host_ts)
4715 {
4716 struct target_timespec *target_ts;
4717
4718 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4719 return -TARGET_EFAULT;
4720 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4721 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4722 unlock_user_struct(target_ts, target_addr, 1);
4723 return 0;
4724 }
4725
4726 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4727 static inline abi_long host_to_target_stat64(void *cpu_env,
4728 abi_ulong target_addr,
4729 struct stat *host_st)
4730 {
4731 #ifdef TARGET_ARM
4732 if (((CPUARMState *)cpu_env)->eabi) {
4733 struct target_eabi_stat64 *target_st;
4734
4735 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4736 return -TARGET_EFAULT;
4737 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4738 __put_user(host_st->st_dev, &target_st->st_dev);
4739 __put_user(host_st->st_ino, &target_st->st_ino);
4740 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4741 __put_user(host_st->st_ino, &target_st->__st_ino);
4742 #endif
4743 __put_user(host_st->st_mode, &target_st->st_mode);
4744 __put_user(host_st->st_nlink, &target_st->st_nlink);
4745 __put_user(host_st->st_uid, &target_st->st_uid);
4746 __put_user(host_st->st_gid, &target_st->st_gid);
4747 __put_user(host_st->st_rdev, &target_st->st_rdev);
4748 __put_user(host_st->st_size, &target_st->st_size);
4749 __put_user(host_st->st_blksize, &target_st->st_blksize);
4750 __put_user(host_st->st_blocks, &target_st->st_blocks);
4751 __put_user(host_st->st_atime, &target_st->target_st_atime);
4752 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4753 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4754 unlock_user_struct(target_st, target_addr, 1);
4755 } else
4756 #endif
4757 {
4758 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4759 struct target_stat *target_st;
4760 #else
4761 struct target_stat64 *target_st;
4762 #endif
4763
4764 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4765 return -TARGET_EFAULT;
4766 memset(target_st, 0, sizeof(*target_st));
4767 __put_user(host_st->st_dev, &target_st->st_dev);
4768 __put_user(host_st->st_ino, &target_st->st_ino);
4769 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4770 __put_user(host_st->st_ino, &target_st->__st_ino);
4771 #endif
4772 __put_user(host_st->st_mode, &target_st->st_mode);
4773 __put_user(host_st->st_nlink, &target_st->st_nlink);
4774 __put_user(host_st->st_uid, &target_st->st_uid);
4775 __put_user(host_st->st_gid, &target_st->st_gid);
4776 __put_user(host_st->st_rdev, &target_st->st_rdev);
4777 /* XXX: better use of kernel struct */
4778 __put_user(host_st->st_size, &target_st->st_size);
4779 __put_user(host_st->st_blksize, &target_st->st_blksize);
4780 __put_user(host_st->st_blocks, &target_st->st_blocks);
4781 __put_user(host_st->st_atime, &target_st->target_st_atime);
4782 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4783 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4784 unlock_user_struct(target_st, target_addr, 1);
4785 }
4786
4787 return 0;
4788 }
4789 #endif
4790
4791 #if defined(CONFIG_USE_NPTL)
4792 /* ??? Using host futex calls even when target atomic operations
4793 are not really atomic probably breaks things. However implementing
4794 futexes locally would make futexes shared between multiple processes
4795 tricky. However they're probably useless because guest atomic
4796 operations won't work either. */
4797 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4798 target_ulong uaddr2, int val3)
4799 {
4800 struct timespec ts, *pts;
4801 int base_op;
4802
4803 /* ??? We assume FUTEX_* constants are the same on both host
4804 and target. */
4805 #ifdef FUTEX_CMD_MASK
4806 base_op = op & FUTEX_CMD_MASK;
4807 #else
4808 base_op = op;
4809 #endif
4810 switch (base_op) {
4811 case FUTEX_WAIT:
4812 if (timeout) {
4813 pts = &ts;
4814 target_to_host_timespec(pts, timeout);
4815 } else {
4816 pts = NULL;
4817 }
4818 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4819 pts, NULL, 0));
4820 case FUTEX_WAKE:
4821 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4822 case FUTEX_FD:
4823 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4824 case FUTEX_REQUEUE:
4825 case FUTEX_CMP_REQUEUE:
4826 case FUTEX_WAKE_OP:
4827 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4828 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4829 But the prototype takes a `struct timespec *'; insert casts
4830 to satisfy the compiler. We do not need to tswap TIMEOUT
4831 since it's not compared to guest memory. */
4832 pts = (struct timespec *)(uintptr_t) timeout;
4833 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4834 g2h(uaddr2),
4835 (base_op == FUTEX_CMP_REQUEUE
4836 ? tswap32(val3)
4837 : val3)));
4838 default:
4839 return -TARGET_ENOSYS;
4840 }
4841 }
4842 #endif
4843
4844 /* Map host to target signal numbers for the wait family of syscalls.
4845 Assume all other status bits are the same. */
4846 static int host_to_target_waitstatus(int status)
4847 {
4848 if (WIFSIGNALED(status)) {
4849 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4850 }
4851 if (WIFSTOPPED(status)) {
4852 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4853 | (status & 0xff);
4854 }
4855 return status;
4856 }
4857
4858 int get_osversion(void)
4859 {
4860 static int osversion;
4861 struct new_utsname buf;
4862 const char *s;
4863 int i, n, tmp;
4864 if (osversion)
4865 return osversion;
4866 if (qemu_uname_release && *qemu_uname_release) {
4867 s = qemu_uname_release;
4868 } else {
4869 if (sys_uname(&buf))
4870 return 0;
4871 s = buf.release;
4872 }
4873 tmp = 0;
4874 for (i = 0; i < 3; i++) {
4875 n = 0;
4876 while (*s >= '0' && *s <= '9') {
4877 n *= 10;
4878 n += *s - '0';
4879 s++;
4880 }
4881 tmp = (tmp << 8) + n;
4882 if (*s == '.')
4883 s++;
4884 }
4885 osversion = tmp;
4886 return osversion;
4887 }
4888
4889
4890 static int open_self_maps(void *cpu_env, int fd)
4891 {
4892 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4893 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4894 #endif
4895 FILE *fp;
4896 char *line = NULL;
4897 size_t len = 0;
4898 ssize_t read;
4899
4900 fp = fopen("/proc/self/maps", "r");
4901 if (fp == NULL) {
4902 return -EACCES;
4903 }
4904
4905 while ((read = getline(&line, &len, fp)) != -1) {
4906 int fields, dev_maj, dev_min, inode;
4907 uint64_t min, max, offset;
4908 char flag_r, flag_w, flag_x, flag_p;
4909 char path[512] = "";
4910 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4911 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4912 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4913
4914 if ((fields < 10) || (fields > 11)) {
4915 continue;
4916 }
4917 if (!strncmp(path, "[stack]", 7)) {
4918 continue;
4919 }
4920 if (h2g_valid(min) && h2g_valid(max)) {
4921 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4922 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n",
4923 h2g(min), h2g(max), flag_r, flag_w,
4924 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4925 path[0] ? " " : "", path);
4926 }
4927 }
4928
4929 free(line);
4930 fclose(fp);
4931
4932 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4933 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4934 (unsigned long long)ts->info->stack_limit,
4935 (unsigned long long)(ts->stack_base + (TARGET_PAGE_SIZE - 1))
4936 & TARGET_PAGE_MASK,
4937 (unsigned long long)0);
4938 #endif
4939
4940 return 0;
4941 }
4942
4943 static int open_self_stat(void *cpu_env, int fd)
4944 {
4945 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4946 abi_ulong start_stack = ts->info->start_stack;
4947 int i;
4948
4949 for (i = 0; i < 44; i++) {
4950 char buf[128];
4951 int len;
4952 uint64_t val = 0;
4953
4954 if (i == 0) {
4955 /* pid */
4956 val = getpid();
4957 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4958 } else if (i == 1) {
4959 /* app name */
4960 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
4961 } else if (i == 27) {
4962 /* stack bottom */
4963 val = start_stack;
4964 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4965 } else {
4966 /* for the rest, there is MasterCard */
4967 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
4968 }
4969
4970 len = strlen(buf);
4971 if (write(fd, buf, len) != len) {
4972 return -1;
4973 }
4974 }
4975
4976 return 0;
4977 }
4978
4979 static int open_self_auxv(void *cpu_env, int fd)
4980 {
4981 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4982 abi_ulong auxv = ts->info->saved_auxv;
4983 abi_ulong len = ts->info->auxv_len;
4984 char *ptr;
4985
4986 /*
4987 * Auxiliary vector is stored in target process stack.
4988 * read in whole auxv vector and copy it to file
4989 */
4990 ptr = lock_user(VERIFY_READ, auxv, len, 0);
4991 if (ptr != NULL) {
4992 while (len > 0) {
4993 ssize_t r;
4994 r = write(fd, ptr, len);
4995 if (r <= 0) {
4996 break;
4997 }
4998 len -= r;
4999 ptr += r;
5000 }
5001 lseek(fd, 0, SEEK_SET);
5002 unlock_user(ptr, auxv, len);
5003 }
5004
5005 return 0;
5006 }
5007
5008 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5009 {
5010 struct fake_open {
5011 const char *filename;
5012 int (*fill)(void *cpu_env, int fd);
5013 };
5014 const struct fake_open *fake_open;
5015 static const struct fake_open fakes[] = {
5016 { "/proc/self/maps", open_self_maps },
5017 { "/proc/self/stat", open_self_stat },
5018 { "/proc/self/auxv", open_self_auxv },
5019 { NULL, NULL }
5020 };
5021
5022 for (fake_open = fakes; fake_open->filename; fake_open++) {
5023 if (!strncmp(pathname, fake_open->filename,
5024 strlen(fake_open->filename))) {
5025 break;
5026 }
5027 }
5028
5029 if (fake_open->filename) {
5030 const char *tmpdir;
5031 char filename[PATH_MAX];
5032 int fd, r;
5033
5034 /* create temporary file to map stat to */
5035 tmpdir = getenv("TMPDIR");
5036 if (!tmpdir)
5037 tmpdir = "/tmp";
5038 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5039 fd = mkstemp(filename);
5040 if (fd < 0) {
5041 return fd;
5042 }
5043 unlink(filename);
5044
5045 if ((r = fake_open->fill(cpu_env, fd))) {
5046 close(fd);
5047 return r;
5048 }
5049 lseek(fd, 0, SEEK_SET);
5050
5051 return fd;
5052 }
5053
5054 return get_errno(open(path(pathname), flags, mode));
5055 }
5056
5057 /* do_syscall() should always have a single exit point at the end so
5058 that actions, such as logging of syscall results, can be performed.
5059 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5060 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5061 abi_long arg2, abi_long arg3, abi_long arg4,
5062 abi_long arg5, abi_long arg6, abi_long arg7,
5063 abi_long arg8)
5064 {
5065 abi_long ret;
5066 struct stat st;
5067 struct statfs stfs;
5068 void *p;
5069
5070 #ifdef DEBUG
5071 gemu_log("syscall %d", num);
5072 #endif
5073 if(do_strace)
5074 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5075
5076 switch(num) {
5077 case TARGET_NR_exit:
5078 #ifdef CONFIG_USE_NPTL
5079 /* In old applications this may be used to implement _exit(2).
5080 However in threaded applictions it is used for thread termination,
5081 and _exit_group is used for application termination.
5082 Do thread termination if we have more then one thread. */
5083 /* FIXME: This probably breaks if a signal arrives. We should probably
5084 be disabling signals. */
5085 if (first_cpu->next_cpu) {
5086 TaskState *ts;
5087 CPUArchState **lastp;
5088 CPUArchState *p;
5089
5090 cpu_list_lock();
5091 lastp = &first_cpu;
5092 p = first_cpu;
5093 while (p && p != (CPUArchState *)cpu_env) {
5094 lastp = &p->next_cpu;
5095 p = p->next_cpu;
5096 }
5097 /* If we didn't find the CPU for this thread then something is
5098 horribly wrong. */
5099 if (!p)
5100 abort();
5101 /* Remove the CPU from the list. */
5102 *lastp = p->next_cpu;
5103 cpu_list_unlock();
5104 ts = ((CPUArchState *)cpu_env)->opaque;
5105 if (ts->child_tidptr) {
5106 put_user_u32(0, ts->child_tidptr);
5107 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5108 NULL, NULL, 0);
5109 }
5110 thread_env = NULL;
5111 object_delete(OBJECT(ENV_GET_CPU(cpu_env)));
5112 g_free(ts);
5113 pthread_exit(NULL);
5114 }
5115 #endif
5116 #ifdef TARGET_GPROF
5117 _mcleanup();
5118 #endif
5119 gdb_exit(cpu_env, arg1);
5120 _exit(arg1);
5121 ret = 0; /* avoid warning */
5122 break;
5123 case TARGET_NR_read:
5124 if (arg3 == 0)
5125 ret = 0;
5126 else {
5127 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5128 goto efault;
5129 ret = get_errno(read(arg1, p, arg3));
5130 unlock_user(p, arg2, ret);
5131 }
5132 break;
5133 case TARGET_NR_write:
5134 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5135 goto efault;
5136 ret = get_errno(write(arg1, p, arg3));
5137 unlock_user(p, arg2, 0);
5138 break;
5139 case TARGET_NR_open:
5140 if (!(p = lock_user_string(arg1)))
5141 goto efault;
5142 ret = get_errno(do_open(cpu_env, p,
5143 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5144 arg3));
5145 unlock_user(p, arg1, 0);
5146 break;
5147 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5148 case TARGET_NR_openat:
5149 if (!(p = lock_user_string(arg2)))
5150 goto efault;
5151 ret = get_errno(sys_openat(arg1,
5152 path(p),
5153 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5154 arg4));
5155 unlock_user(p, arg2, 0);
5156 break;
5157 #endif
5158 case TARGET_NR_close:
5159 ret = get_errno(close(arg1));
5160 break;
5161 case TARGET_NR_brk:
5162 ret = do_brk(arg1);
5163 break;
5164 case TARGET_NR_fork:
5165 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5166 break;
5167 #ifdef TARGET_NR_waitpid
5168 case TARGET_NR_waitpid:
5169 {
5170 int status;
5171 ret = get_errno(waitpid(arg1, &status, arg3));
5172 if (!is_error(ret) && arg2 && ret
5173 && put_user_s32(host_to_target_waitstatus(status), arg2))
5174 goto efault;
5175 }
5176 break;
5177 #endif
5178 #ifdef TARGET_NR_waitid
5179 case TARGET_NR_waitid:
5180 {
5181 siginfo_t info;
5182 info.si_pid = 0;
5183 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5184 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5185 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5186 goto efault;
5187 host_to_target_siginfo(p, &info);
5188 unlock_user(p, arg3, sizeof(target_siginfo_t));
5189 }
5190 }
5191 break;
5192 #endif
5193 #ifdef TARGET_NR_creat /* not on alpha */
5194 case TARGET_NR_creat:
5195 if (!(p = lock_user_string(arg1)))
5196 goto efault;
5197 ret = get_errno(creat(p, arg2));
5198 unlock_user(p, arg1, 0);
5199 break;
5200 #endif
5201 case TARGET_NR_link:
5202 {
5203 void * p2;
5204 p = lock_user_string(arg1);
5205 p2 = lock_user_string(arg2);
5206 if (!p || !p2)
5207 ret = -TARGET_EFAULT;
5208 else
5209 ret = get_errno(link(p, p2));
5210 unlock_user(p2, arg2, 0);
5211 unlock_user(p, arg1, 0);
5212 }
5213 break;
5214 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5215 case TARGET_NR_linkat:
5216 {
5217 void * p2 = NULL;
5218 if (!arg2 || !arg4)
5219 goto efault;
5220 p = lock_user_string(arg2);
5221 p2 = lock_user_string(arg4);
5222 if (!p || !p2)
5223 ret = -TARGET_EFAULT;
5224 else
5225 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
5226 unlock_user(p, arg2, 0);
5227 unlock_user(p2, arg4, 0);
5228 }
5229 break;
5230 #endif
5231 case TARGET_NR_unlink:
5232 if (!(p = lock_user_string(arg1)))
5233 goto efault;
5234 ret = get_errno(unlink(p));
5235 unlock_user(p, arg1, 0);
5236 break;
5237 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5238 case TARGET_NR_unlinkat:
5239 if (!(p = lock_user_string(arg2)))
5240 goto efault;
5241 ret = get_errno(sys_unlinkat(arg1, p, arg3));
5242 unlock_user(p, arg2, 0);
5243 break;
5244 #endif
5245 case TARGET_NR_execve:
5246 {
5247 char **argp, **envp;
5248 int argc, envc;
5249 abi_ulong gp;
5250 abi_ulong guest_argp;
5251 abi_ulong guest_envp;
5252 abi_ulong addr;
5253 char **q;
5254 int total_size = 0;
5255
5256 argc = 0;
5257 guest_argp = arg2;
5258 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5259 if (get_user_ual(addr, gp))
5260 goto efault;
5261 if (!addr)
5262 break;
5263 argc++;
5264 }
5265 envc = 0;
5266 guest_envp = arg3;
5267 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5268 if (get_user_ual(addr, gp))
5269 goto efault;
5270 if (!addr)
5271 break;
5272 envc++;
5273 }
5274
5275 argp = alloca((argc + 1) * sizeof(void *));
5276 envp = alloca((envc + 1) * sizeof(void *));
5277
5278 for (gp = guest_argp, q = argp; gp;
5279 gp += sizeof(abi_ulong), q++) {
5280 if (get_user_ual(addr, gp))
5281 goto execve_efault;
5282 if (!addr)
5283 break;
5284 if (!(*q = lock_user_string(addr)))
5285 goto execve_efault;
5286 total_size += strlen(*q) + 1;
5287 }
5288 *q = NULL;
5289
5290 for (gp = guest_envp, q = envp; gp;
5291 gp += sizeof(abi_ulong), q++) {
5292 if (get_user_ual(addr, gp))
5293 goto execve_efault;
5294 if (!addr)
5295 break;
5296 if (!(*q = lock_user_string(addr)))
5297 goto execve_efault;
5298 total_size += strlen(*q) + 1;
5299 }
5300 *q = NULL;
5301
5302 /* This case will not be caught by the host's execve() if its
5303 page size is bigger than the target's. */
5304 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5305 ret = -TARGET_E2BIG;
5306 goto execve_end;
5307 }
5308 if (!(p = lock_user_string(arg1)))
5309 goto execve_efault;
5310 ret = get_errno(execve(p, argp, envp));
5311 unlock_user(p, arg1, 0);
5312
5313 goto execve_end;
5314
5315 execve_efault:
5316 ret = -TARGET_EFAULT;
5317
5318 execve_end:
5319 for (gp = guest_argp, q = argp; *q;
5320 gp += sizeof(abi_ulong), q++) {
5321 if (get_user_ual(addr, gp)
5322 || !addr)
5323 break;
5324 unlock_user(*q, addr, 0);
5325 }
5326 for (gp = guest_envp, q = envp; *q;
5327 gp += sizeof(abi_ulong), q++) {
5328 if (get_user_ual(addr, gp)
5329 || !addr)
5330 break;
5331 unlock_user(*q, addr, 0);
5332 }
5333 }
5334 break;
5335 case TARGET_NR_chdir:
5336 if (!(p = lock_user_string(arg1)))
5337 goto efault;
5338 ret = get_errno(chdir(p));
5339 unlock_user(p, arg1, 0);
5340 break;
5341 #ifdef TARGET_NR_time
5342 case TARGET_NR_time:
5343 {
5344 time_t host_time;
5345 ret = get_errno(time(&host_time));
5346 if (!is_error(ret)
5347 && arg1
5348 && put_user_sal(host_time, arg1))
5349 goto efault;
5350 }
5351 break;
5352 #endif
5353 case TARGET_NR_mknod:
5354 if (!(p = lock_user_string(arg1)))
5355 goto efault;
5356 ret = get_errno(mknod(p, arg2, arg3));
5357 unlock_user(p, arg1, 0);
5358 break;
5359 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5360 case TARGET_NR_mknodat:
5361 if (!(p = lock_user_string(arg2)))
5362 goto efault;
5363 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5364 unlock_user(p, arg2, 0);
5365 break;
5366 #endif
5367 case TARGET_NR_chmod:
5368 if (!(p = lock_user_string(arg1)))
5369 goto efault;
5370 ret = get_errno(chmod(p, arg2));
5371 unlock_user(p, arg1, 0);
5372 break;
5373 #ifdef TARGET_NR_break
5374 case TARGET_NR_break:
5375 goto unimplemented;
5376 #endif
5377 #ifdef TARGET_NR_oldstat
5378 case TARGET_NR_oldstat:
5379 goto unimplemented;
5380 #endif
5381 case TARGET_NR_lseek:
5382 ret = get_errno(lseek(arg1, arg2, arg3));
5383 break;
5384 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5385 /* Alpha specific */
5386 case TARGET_NR_getxpid:
5387 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5388 ret = get_errno(getpid());
5389 break;
5390 #endif
5391 #ifdef TARGET_NR_getpid
5392 case TARGET_NR_getpid:
5393 ret = get_errno(getpid());
5394 break;
5395 #endif
5396 case TARGET_NR_mount:
5397 {
5398 /* need to look at the data field */
5399 void *p2, *p3;
5400 p = lock_user_string(arg1);
5401 p2 = lock_user_string(arg2);
5402 p3 = lock_user_string(arg3);
5403 if (!p || !p2 || !p3)
5404 ret = -TARGET_EFAULT;
5405 else {
5406 /* FIXME - arg5 should be locked, but it isn't clear how to
5407 * do that since it's not guaranteed to be a NULL-terminated
5408 * string.
5409 */
5410 if ( ! arg5 )
5411 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5412 else
5413 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5414 }
5415 unlock_user(p, arg1, 0);
5416 unlock_user(p2, arg2, 0);
5417 unlock_user(p3, arg3, 0);
5418 break;
5419 }
5420 #ifdef TARGET_NR_umount
5421 case TARGET_NR_umount:
5422 if (!(p = lock_user_string(arg1)))
5423 goto efault;
5424 ret = get_errno(umount(p));
5425 unlock_user(p, arg1, 0);
5426 break;
5427 #endif
5428 #ifdef TARGET_NR_stime /* not on alpha */
5429 case TARGET_NR_stime:
5430 {
5431 time_t host_time;
5432 if (get_user_sal(host_time, arg1))
5433 goto efault;
5434 ret = get_errno(stime(&host_time));
5435 }
5436 break;
5437 #endif
5438 case TARGET_NR_ptrace:
5439 goto unimplemented;
5440 #ifdef TARGET_NR_alarm /* not on alpha */
5441 case TARGET_NR_alarm:
5442 ret = alarm(arg1);
5443 break;
5444 #endif
5445 #ifdef TARGET_NR_oldfstat
5446 case TARGET_NR_oldfstat:
5447 goto unimplemented;
5448 #endif
5449 #ifdef TARGET_NR_pause /* not on alpha */
5450 case TARGET_NR_pause:
5451 ret = get_errno(pause());
5452 break;
5453 #endif
5454 #ifdef TARGET_NR_utime
5455 case TARGET_NR_utime:
5456 {
5457 struct utimbuf tbuf, *host_tbuf;
5458 struct target_utimbuf *target_tbuf;
5459 if (arg2) {
5460 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5461 goto efault;
5462 tbuf.actime = tswapal(target_tbuf->actime);
5463 tbuf.modtime = tswapal(target_tbuf->modtime);
5464 unlock_user_struct(target_tbuf, arg2, 0);
5465 host_tbuf = &tbuf;
5466 } else {
5467 host_tbuf = NULL;
5468 }
5469 if (!(p = lock_user_string(arg1)))
5470 goto efault;
5471 ret = get_errno(utime(p, host_tbuf));
5472 unlock_user(p, arg1, 0);
5473 }
5474 break;
5475 #endif
5476 case TARGET_NR_utimes:
5477 {
5478 struct timeval *tvp, tv[2];
5479 if (arg2) {
5480 if (copy_from_user_timeval(&tv[0], arg2)
5481 || copy_from_user_timeval(&tv[1],
5482 arg2 + sizeof(struct target_timeval)))
5483 goto efault;
5484 tvp = tv;
5485 } else {
5486 tvp = NULL;
5487 }
5488 if (!(p = lock_user_string(arg1)))
5489 goto efault;
5490 ret = get_errno(utimes(p, tvp));
5491 unlock_user(p, arg1, 0);
5492 }
5493 break;
5494 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5495 case TARGET_NR_futimesat:
5496 {
5497 struct timeval *tvp, tv[2];
5498 if (arg3) {
5499 if (copy_from_user_timeval(&tv[0], arg3)
5500 || copy_from_user_timeval(&tv[1],
5501 arg3 + sizeof(struct target_timeval)))
5502 goto efault;
5503 tvp = tv;
5504 } else {
5505 tvp = NULL;
5506 }
5507 if (!(p = lock_user_string(arg2)))
5508 goto efault;
5509 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5510 unlock_user(p, arg2, 0);
5511 }
5512 break;
5513 #endif
5514 #ifdef TARGET_NR_stty
5515 case TARGET_NR_stty:
5516 goto unimplemented;
5517 #endif
5518 #ifdef TARGET_NR_gtty
5519 case TARGET_NR_gtty:
5520 goto unimplemented;
5521 #endif
5522 case TARGET_NR_access:
5523 if (!(p = lock_user_string(arg1)))
5524 goto efault;
5525 ret = get_errno(access(path(p), arg2));
5526 unlock_user(p, arg1, 0);
5527 break;
5528 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5529 case TARGET_NR_faccessat:
5530 if (!(p = lock_user_string(arg2)))
5531 goto efault;
5532 ret = get_errno(sys_faccessat(arg1, p, arg3));
5533 unlock_user(p, arg2, 0);
5534 break;
5535 #endif
5536 #ifdef TARGET_NR_nice /* not on alpha */
5537 case TARGET_NR_nice:
5538 ret = get_errno(nice(arg1));
5539 break;
5540 #endif
5541 #ifdef TARGET_NR_ftime
5542 case TARGET_NR_ftime:
5543 goto unimplemented;
5544 #endif
5545 case TARGET_NR_sync:
5546 sync();
5547 ret = 0;
5548 break;
5549 case TARGET_NR_kill:
5550 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5551 break;
5552 case TARGET_NR_rename:
5553 {
5554 void *p2;
5555 p = lock_user_string(arg1);
5556 p2 = lock_user_string(arg2);
5557 if (!p || !p2)
5558 ret = -TARGET_EFAULT;
5559 else
5560 ret = get_errno(rename(p, p2));
5561 unlock_user(p2, arg2, 0);
5562 unlock_user(p, arg1, 0);
5563 }
5564 break;
5565 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5566 case TARGET_NR_renameat:
5567 {
5568 void *p2;
5569 p = lock_user_string(arg2);
5570 p2 = lock_user_string(arg4);
5571 if (!p || !p2)
5572 ret = -TARGET_EFAULT;
5573 else
5574 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5575 unlock_user(p2, arg4, 0);
5576 unlock_user(p, arg2, 0);
5577 }
5578 break;
5579 #endif
5580 case TARGET_NR_mkdir:
5581 if (!(p = lock_user_string(arg1)))
5582 goto efault;
5583 ret = get_errno(mkdir(p, arg2));
5584 unlock_user(p, arg1, 0);
5585 break;
5586 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5587 case TARGET_NR_mkdirat:
5588 if (!(p = lock_user_string(arg2)))
5589 goto efault;
5590 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5591 unlock_user(p, arg2, 0);
5592 break;
5593 #endif
5594 case TARGET_NR_rmdir:
5595 if (!(p = lock_user_string(arg1)))
5596 goto efault;
5597 ret = get_errno(rmdir(p));
5598 unlock_user(p, arg1, 0);
5599 break;
5600 case TARGET_NR_dup:
5601 ret = get_errno(dup(arg1));
5602 break;
5603 case TARGET_NR_pipe:
5604 ret = do_pipe(cpu_env, arg1, 0, 0);
5605 break;
5606 #ifdef TARGET_NR_pipe2
5607 case TARGET_NR_pipe2:
5608 ret = do_pipe(cpu_env, arg1,
5609 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5610 break;
5611 #endif
5612 case TARGET_NR_times:
5613 {
5614 struct target_tms *tmsp;
5615 struct tms tms;
5616 ret = get_errno(times(&tms));
5617 if (arg1) {
5618 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5619 if (!tmsp)
5620 goto efault;
5621 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5622 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5623 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5624 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5625 }
5626 if (!is_error(ret))
5627 ret = host_to_target_clock_t(ret);
5628 }
5629 break;
5630 #ifdef TARGET_NR_prof
5631 case TARGET_NR_prof:
5632 goto unimplemented;
5633 #endif
5634 #ifdef TARGET_NR_signal
5635 case TARGET_NR_signal:
5636 goto unimplemented;
5637 #endif
5638 case TARGET_NR_acct:
5639 if (arg1 == 0) {
5640 ret = get_errno(acct(NULL));
5641 } else {
5642 if (!(p = lock_user_string(arg1)))
5643 goto efault;
5644 ret = get_errno(acct(path(p)));
5645 unlock_user(p, arg1, 0);
5646 }
5647 break;
5648 #ifdef TARGET_NR_umount2 /* not on alpha */
5649 case TARGET_NR_umount2:
5650 if (!(p = lock_user_string(arg1)))
5651 goto efault;
5652 ret = get_errno(umount2(p, arg2));
5653 unlock_user(p, arg1, 0);
5654 break;
5655 #endif
5656 #ifdef TARGET_NR_lock
5657 case TARGET_NR_lock:
5658 goto unimplemented;
5659 #endif
5660 case TARGET_NR_ioctl:
5661 ret = do_ioctl(arg1, arg2, arg3);
5662 break;
5663 case TARGET_NR_fcntl:
5664 ret = do_fcntl(arg1, arg2, arg3);
5665 break;
5666 #ifdef TARGET_NR_mpx
5667 case TARGET_NR_mpx:
5668 goto unimplemented;
5669 #endif
5670 case TARGET_NR_setpgid:
5671 ret = get_errno(setpgid(arg1, arg2));
5672 break;
5673 #ifdef TARGET_NR_ulimit
5674 case TARGET_NR_ulimit:
5675 goto unimplemented;
5676 #endif
5677 #ifdef TARGET_NR_oldolduname
5678 case TARGET_NR_oldolduname:
5679 goto unimplemented;
5680 #endif
5681 case TARGET_NR_umask:
5682 ret = get_errno(umask(arg1));
5683 break;
5684 case TARGET_NR_chroot:
5685 if (!(p = lock_user_string(arg1)))
5686 goto efault;
5687 ret = get_errno(chroot(p));
5688 unlock_user(p, arg1, 0);
5689 break;
5690 case TARGET_NR_ustat:
5691 goto unimplemented;
5692 case TARGET_NR_dup2:
5693 ret = get_errno(dup2(arg1, arg2));
5694 break;
5695 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5696 case TARGET_NR_dup3:
5697 ret = get_errno(dup3(arg1, arg2, arg3));
5698 break;
5699 #endif
5700 #ifdef TARGET_NR_getppid /* not on alpha */
5701 case TARGET_NR_getppid:
5702 ret = get_errno(getppid());
5703 break;
5704 #endif
5705 case TARGET_NR_getpgrp:
5706 ret = get_errno(getpgrp());
5707 break;
5708 case TARGET_NR_setsid:
5709 ret = get_errno(setsid());
5710 break;
5711 #ifdef TARGET_NR_sigaction
5712 case TARGET_NR_sigaction:
5713 {
5714 #if defined(TARGET_ALPHA)
5715 struct target_sigaction act, oact, *pact = 0;
5716 struct target_old_sigaction *old_act;
5717 if (arg2) {
5718 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5719 goto efault;
5720 act._sa_handler = old_act->_sa_handler;
5721 target_siginitset(&act.sa_mask, old_act->sa_mask);
5722 act.sa_flags = old_act->sa_flags;
5723 act.sa_restorer = 0;
5724 unlock_user_struct(old_act, arg2, 0);
5725 pact = &act;
5726 }
5727 ret = get_errno(do_sigaction(arg1, pact, &oact));
5728 if (!is_error(ret) && arg3) {
5729 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5730 goto efault;
5731 old_act->_sa_handler = oact._sa_handler;
5732 old_act->sa_mask = oact.sa_mask.sig[0];
5733 old_act->sa_flags = oact.sa_flags;
5734 unlock_user_struct(old_act, arg3, 1);
5735 }
5736 #elif defined(TARGET_MIPS)
5737 struct target_sigaction act, oact, *pact, *old_act;
5738
5739 if (arg2) {
5740 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5741 goto efault;
5742 act._sa_handler = old_act->_sa_handler;
5743 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5744 act.sa_flags = old_act->sa_flags;
5745 unlock_user_struct(old_act, arg2, 0);
5746 pact = &act;
5747 } else {
5748 pact = NULL;
5749 }
5750
5751 ret = get_errno(do_sigaction(arg1, pact, &oact));
5752
5753 if (!is_error(ret) && arg3) {
5754 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5755 goto efault;
5756 old_act->_sa_handler = oact._sa_handler;
5757 old_act->sa_flags = oact.sa_flags;
5758 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5759 old_act->sa_mask.sig[1] = 0;
5760 old_act->sa_mask.sig[2] = 0;
5761 old_act->sa_mask.sig[3] = 0;
5762 unlock_user_struct(old_act, arg3, 1);
5763 }
5764 #else
5765 struct target_old_sigaction *old_act;
5766 struct target_sigaction act, oact, *pact;
5767 if (arg2) {
5768 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5769 goto efault;
5770 act._sa_handler = old_act->_sa_handler;
5771 target_siginitset(&act.sa_mask, old_act->sa_mask);
5772 act.sa_flags = old_act->sa_flags;
5773 act.sa_restorer = old_act->sa_restorer;
5774 unlock_user_struct(old_act, arg2, 0);
5775 pact = &act;
5776 } else {
5777 pact = NULL;
5778 }
5779 ret = get_errno(do_sigaction(arg1, pact, &oact));
5780 if (!is_error(ret) && arg3) {
5781 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5782 goto efault;
5783 old_act->_sa_handler = oact._sa_handler;
5784 old_act->sa_mask = oact.sa_mask.sig[0];
5785 old_act->sa_flags = oact.sa_flags;
5786 old_act->sa_restorer = oact.sa_restorer;
5787 unlock_user_struct(old_act, arg3, 1);
5788 }
5789 #endif
5790 }
5791 break;
5792 #endif
5793 case TARGET_NR_rt_sigaction:
5794 {
5795 #if defined(TARGET_ALPHA)
5796 struct target_sigaction act, oact, *pact = 0;
5797 struct target_rt_sigaction *rt_act;
5798 /* ??? arg4 == sizeof(sigset_t). */
5799 if (arg2) {
5800 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5801 goto efault;
5802 act._sa_handler = rt_act->_sa_handler;
5803 act.sa_mask = rt_act->sa_mask;
5804 act.sa_flags = rt_act->sa_flags;
5805 act.sa_restorer = arg5;
5806 unlock_user_struct(rt_act, arg2, 0);
5807 pact = &act;
5808 }
5809 ret = get_errno(do_sigaction(arg1, pact, &oact));
5810 if (!is_error(ret) && arg3) {
5811 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5812 goto efault;
5813 rt_act->_sa_handler = oact._sa_handler;
5814 rt_act->sa_mask = oact.sa_mask;
5815 rt_act->sa_flags = oact.sa_flags;
5816 unlock_user_struct(rt_act, arg3, 1);
5817 }
5818 #else
5819 struct target_sigaction *act;
5820 struct target_sigaction *oact;
5821
5822 if (arg2) {
5823 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5824 goto efault;
5825 } else
5826 act = NULL;
5827 if (arg3) {
5828 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5829 ret = -TARGET_EFAULT;
5830 goto rt_sigaction_fail;
5831 }
5832 } else
5833 oact = NULL;
5834 ret = get_errno(do_sigaction(arg1, act, oact));
5835 rt_sigaction_fail:
5836 if (act)
5837 unlock_user_struct(act, arg2, 0);
5838 if (oact)
5839 unlock_user_struct(oact, arg3, 1);
5840 #endif
5841 }
5842 break;
5843 #ifdef TARGET_NR_sgetmask /* not on alpha */
5844 case TARGET_NR_sgetmask:
5845 {
5846 sigset_t cur_set;
5847 abi_ulong target_set;
5848 sigprocmask(0, NULL, &cur_set);
5849 host_to_target_old_sigset(&target_set, &cur_set);
5850 ret = target_set;
5851 }
5852 break;
5853 #endif
5854 #ifdef TARGET_NR_ssetmask /* not on alpha */
5855 case TARGET_NR_ssetmask:
5856 {
5857 sigset_t set, oset, cur_set;
5858 abi_ulong target_set = arg1;
5859 sigprocmask(0, NULL, &cur_set);
5860 target_to_host_old_sigset(&set, &target_set);
5861 sigorset(&set, &set, &cur_set);
5862 sigprocmask(SIG_SETMASK, &set, &oset);
5863 host_to_target_old_sigset(&target_set, &oset);
5864 ret = target_set;
5865 }
5866 break;
5867 #endif
5868 #ifdef TARGET_NR_sigprocmask
5869 case TARGET_NR_sigprocmask:
5870 {
5871 #if defined(TARGET_ALPHA)
5872 sigset_t set, oldset;
5873 abi_ulong mask;
5874 int how;
5875
5876 switch (arg1) {
5877 case TARGET_SIG_BLOCK:
5878 how = SIG_BLOCK;
5879 break;
5880 case TARGET_SIG_UNBLOCK:
5881 how = SIG_UNBLOCK;
5882 break;
5883 case TARGET_SIG_SETMASK:
5884 how = SIG_SETMASK;
5885 break;
5886 default:
5887 ret = -TARGET_EINVAL;
5888 goto fail;
5889 }
5890 mask = arg2;
5891 target_to_host_old_sigset(&set, &mask);
5892
5893 ret = get_errno(sigprocmask(how, &set, &oldset));
5894 if (!is_error(ret)) {
5895 host_to_target_old_sigset(&mask, &oldset);
5896 ret = mask;
5897 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5898 }
5899 #else
5900 sigset_t set, oldset, *set_ptr;
5901 int how;
5902
5903 if (arg2) {
5904 switch (arg1) {
5905 case TARGET_SIG_BLOCK:
5906 how = SIG_BLOCK;
5907 break;
5908 case TARGET_SIG_UNBLOCK:
5909 how = SIG_UNBLOCK;
5910 break;
5911 case TARGET_SIG_SETMASK:
5912 how = SIG_SETMASK;
5913 break;
5914 default:
5915 ret = -TARGET_EINVAL;
5916 goto fail;
5917 }
5918 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5919 goto efault;
5920 target_to_host_old_sigset(&set, p);
5921 unlock_user(p, arg2, 0);
5922 set_ptr = &set;
5923 } else {
5924 how = 0;
5925 set_ptr = NULL;
5926 }
5927 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5928 if (!is_error(ret) && arg3) {
5929 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5930 goto efault;
5931 host_to_target_old_sigset(p, &oldset);
5932 unlock_user(p, arg3, sizeof(target_sigset_t));
5933 }
5934 #endif
5935 }
5936 break;
5937 #endif
5938 case TARGET_NR_rt_sigprocmask:
5939 {
5940 int how = arg1;
5941 sigset_t set, oldset, *set_ptr;
5942
5943 if (arg2) {
5944 switch(how) {
5945 case TARGET_SIG_BLOCK:
5946 how = SIG_BLOCK;
5947 break;
5948 case TARGET_SIG_UNBLOCK:
5949 how = SIG_UNBLOCK;
5950 break;
5951 case TARGET_SIG_SETMASK:
5952 how = SIG_SETMASK;
5953 break;
5954 default:
5955 ret = -TARGET_EINVAL;
5956 goto fail;
5957 }
5958 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5959 goto efault;
5960 target_to_host_sigset(&set, p);
5961 unlock_user(p, arg2, 0);
5962 set_ptr = &set;
5963 } else {
5964 how = 0;
5965 set_ptr = NULL;
5966 }
5967 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5968 if (!is_error(ret) && arg3) {
5969 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5970 goto efault;
5971 host_to_target_sigset(p, &oldset);
5972 unlock_user(p, arg3, sizeof(target_sigset_t));
5973 }
5974 }
5975 break;
5976 #ifdef TARGET_NR_sigpending
5977 case TARGET_NR_sigpending:
5978 {
5979 sigset_t set;
5980 ret = get_errno(sigpending(&set));
5981 if (!is_error(ret)) {
5982 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5983 goto efault;
5984 host_to_target_old_sigset(p, &set);
5985 unlock_user(p, arg1, sizeof(target_sigset_t));
5986 }
5987 }
5988 break;
5989 #endif
5990 case TARGET_NR_rt_sigpending:
5991 {
5992 sigset_t set;
5993 ret = get_errno(sigpending(&set));
5994 if (!is_error(ret)) {
5995 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5996 goto efault;
5997 host_to_target_sigset(p, &set);
5998 unlock_user(p, arg1, sizeof(target_sigset_t));
5999 }
6000 }
6001 break;
6002 #ifdef TARGET_NR_sigsuspend
6003 case TARGET_NR_sigsuspend:
6004 {
6005 sigset_t set;
6006 #if defined(TARGET_ALPHA)
6007 abi_ulong mask = arg1;
6008 target_to_host_old_sigset(&set, &mask);
6009 #else
6010 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6011 goto efault;
6012 target_to_host_old_sigset(&set, p);
6013 unlock_user(p, arg1, 0);
6014 #endif
6015 ret = get_errno(sigsuspend(&set));
6016 }
6017 break;
6018 #endif
6019 case TARGET_NR_rt_sigsuspend:
6020 {
6021 sigset_t set;
6022 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6023 goto efault;
6024 target_to_host_sigset(&set, p);
6025 unlock_user(p, arg1, 0);
6026 ret = get_errno(sigsuspend(&set));
6027 }
6028 break;
6029 case TARGET_NR_rt_sigtimedwait:
6030 {
6031 sigset_t set;
6032 struct timespec uts, *puts;
6033 siginfo_t uinfo;
6034
6035 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6036 goto efault;
6037 target_to_host_sigset(&set, p);
6038 unlock_user(p, arg1, 0);
6039 if (arg3) {
6040 puts = &uts;
6041 target_to_host_timespec(puts, arg3);
6042 } else {
6043 puts = NULL;
6044 }
6045 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6046 if (!is_error(ret) && arg2) {
6047 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6048 goto efault;
6049 host_to_target_siginfo(p, &uinfo);
6050 unlock_user(p, arg2, sizeof(target_siginfo_t));
6051 }
6052 }
6053 break;
6054 case TARGET_NR_rt_sigqueueinfo:
6055 {
6056 siginfo_t uinfo;
6057 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6058 goto efault;
6059 target_to_host_siginfo(&uinfo, p);
6060 unlock_user(p, arg1, 0);
6061 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6062 }
6063 break;
6064 #ifdef TARGET_NR_sigreturn
6065 case TARGET_NR_sigreturn:
6066 /* NOTE: ret is eax, so not transcoding must be done */
6067 ret = do_sigreturn(cpu_env);
6068 break;
6069 #endif
6070 case TARGET_NR_rt_sigreturn:
6071 /* NOTE: ret is eax, so not transcoding must be done */
6072 ret = do_rt_sigreturn(cpu_env);
6073 break;
6074 case TARGET_NR_sethostname:
6075 if (!(p = lock_user_string(arg1)))
6076 goto efault;
6077 ret = get_errno(sethostname(p, arg2));
6078 unlock_user(p, arg1, 0);
6079 break;
6080 case TARGET_NR_setrlimit:
6081 {
6082 int resource = target_to_host_resource(arg1);
6083 struct target_rlimit *target_rlim;
6084 struct rlimit rlim;
6085 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6086 goto efault;
6087 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6088 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6089 unlock_user_struct(target_rlim, arg2, 0);
6090 ret = get_errno(setrlimit(resource, &rlim));
6091 }
6092 break;
6093 case TARGET_NR_getrlimit:
6094 {
6095 int resource = target_to_host_resource(arg1);
6096 struct target_rlimit *target_rlim;
6097 struct rlimit rlim;
6098
6099 ret = get_errno(getrlimit(resource, &rlim));
6100 if (!is_error(ret)) {
6101 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6102 goto efault;
6103 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6104 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6105 unlock_user_struct(target_rlim, arg2, 1);
6106 }
6107 }
6108 break;
6109 case TARGET_NR_getrusage:
6110 {
6111 struct rusage rusage;
6112 ret = get_errno(getrusage(arg1, &rusage));
6113 if (!is_error(ret)) {
6114 host_to_target_rusage(arg2, &rusage);
6115 }
6116 }
6117 break;
6118 case TARGET_NR_gettimeofday:
6119 {
6120 struct timeval tv;
6121 ret = get_errno(gettimeofday(&tv, NULL));
6122 if (!is_error(ret)) {
6123 if (copy_to_user_timeval(arg1, &tv))
6124 goto efault;
6125 }
6126 }
6127 break;
6128 case TARGET_NR_settimeofday:
6129 {
6130 struct timeval tv;
6131 if (copy_from_user_timeval(&tv, arg1))
6132 goto efault;
6133 ret = get_errno(settimeofday(&tv, NULL));
6134 }
6135 break;
6136 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
6137 case TARGET_NR_select:
6138 {
6139 struct target_sel_arg_struct *sel;
6140 abi_ulong inp, outp, exp, tvp;
6141 long nsel;
6142
6143 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6144 goto efault;
6145 nsel = tswapal(sel->n);
6146 inp = tswapal(sel->inp);
6147 outp = tswapal(sel->outp);
6148 exp = tswapal(sel->exp);
6149 tvp = tswapal(sel->tvp);
6150 unlock_user_struct(sel, arg1, 0);
6151 ret = do_select(nsel, inp, outp, exp, tvp);
6152 }
6153 break;
6154 #endif
6155 #ifdef TARGET_NR_pselect6
6156 case TARGET_NR_pselect6:
6157 {
6158 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6159 fd_set rfds, wfds, efds;
6160 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6161 struct timespec ts, *ts_ptr;
6162
6163 /*
6164 * The 6th arg is actually two args smashed together,
6165 * so we cannot use the C library.
6166 */
6167 sigset_t set;
6168 struct {
6169 sigset_t *set;
6170 size_t size;
6171 } sig, *sig_ptr;
6172
6173 abi_ulong arg_sigset, arg_sigsize, *arg7;
6174 target_sigset_t *target_sigset;
6175
6176 n = arg1;
6177 rfd_addr = arg2;
6178 wfd_addr = arg3;
6179 efd_addr = arg4;
6180 ts_addr = arg5;
6181
6182 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6183 if (ret) {
6184 goto fail;
6185 }
6186 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6187 if (ret) {
6188 goto fail;
6189 }
6190 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6191 if (ret) {
6192 goto fail;
6193 }
6194
6195 /*
6196 * This takes a timespec, and not a timeval, so we cannot
6197 * use the do_select() helper ...
6198 */
6199 if (ts_addr) {
6200 if (target_to_host_timespec(&ts, ts_addr)) {
6201 goto efault;
6202 }
6203 ts_ptr = &ts;
6204 } else {
6205 ts_ptr = NULL;
6206 }
6207
6208 /* Extract the two packed args for the sigset */
6209 if (arg6) {
6210 sig_ptr = &sig;
6211 sig.size = _NSIG / 8;
6212
6213 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6214 if (!arg7) {
6215 goto efault;
6216 }
6217 arg_sigset = tswapal(arg7[0]);
6218 arg_sigsize = tswapal(arg7[1]);
6219 unlock_user(arg7, arg6, 0);
6220
6221 if (arg_sigset) {
6222 sig.set = &set;
6223 if (arg_sigsize != sizeof(*target_sigset)) {
6224 /* Like the kernel, we enforce correct size sigsets */
6225 ret = -TARGET_EINVAL;
6226 goto fail;
6227 }
6228 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6229 sizeof(*target_sigset), 1);
6230 if (!target_sigset) {
6231 goto efault;
6232 }
6233 target_to_host_sigset(&set, target_sigset);
6234 unlock_user(target_sigset, arg_sigset, 0);
6235 } else {
6236 sig.set = NULL;
6237 }
6238 } else {
6239 sig_ptr = NULL;
6240 }
6241
6242 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6243 ts_ptr, sig_ptr));
6244
6245 if (!is_error(ret)) {
6246 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6247 goto efault;
6248 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6249 goto efault;
6250 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6251 goto efault;
6252
6253 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6254 goto efault;
6255 }
6256 }
6257 break;
6258 #endif
6259 case TARGET_NR_symlink:
6260 {
6261 void *p2;
6262 p = lock_user_string(arg1);
6263 p2 = lock_user_string(arg2);
6264 if (!p || !p2)
6265 ret = -TARGET_EFAULT;
6266 else
6267 ret = get_errno(symlink(p, p2));
6268 unlock_user(p2, arg2, 0);
6269 unlock_user(p, arg1, 0);
6270 }
6271 break;
6272 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6273 case TARGET_NR_symlinkat:
6274 {
6275 void *p2;
6276 p = lock_user_string(arg1);
6277 p2 = lock_user_string(arg3);
6278 if (!p || !p2)
6279 ret = -TARGET_EFAULT;
6280 else
6281 ret = get_errno(sys_symlinkat(p, arg2, p2));
6282 unlock_user(p2, arg3, 0);
6283 unlock_user(p, arg1, 0);
6284 }
6285 break;
6286 #endif
6287 #ifdef TARGET_NR_oldlstat
6288 case TARGET_NR_oldlstat:
6289 goto unimplemented;
6290 #endif
6291 case TARGET_NR_readlink:
6292 {
6293 void *p2, *temp;
6294 p = lock_user_string(arg1);
6295 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6296 if (!p || !p2)
6297 ret = -TARGET_EFAULT;
6298 else {
6299 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6300 char real[PATH_MAX];
6301 temp = realpath(exec_path,real);
6302 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6303 snprintf((char *)p2, arg3, "%s", real);
6304 }
6305 else
6306 ret = get_errno(readlink(path(p), p2, arg3));
6307 }
6308 unlock_user(p2, arg2, ret);
6309 unlock_user(p, arg1, 0);
6310 }
6311 break;
6312 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6313 case TARGET_NR_readlinkat:
6314 {
6315 void *p2;
6316 p = lock_user_string(arg2);
6317 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6318 if (!p || !p2)
6319 ret = -TARGET_EFAULT;
6320 else
6321 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6322 unlock_user(p2, arg3, ret);
6323 unlock_user(p, arg2, 0);
6324 }
6325 break;
6326 #endif
6327 #ifdef TARGET_NR_uselib
6328 case TARGET_NR_uselib:
6329 goto unimplemented;
6330 #endif
6331 #ifdef TARGET_NR_swapon
6332 case TARGET_NR_swapon:
6333 if (!(p = lock_user_string(arg1)))
6334 goto efault;
6335 ret = get_errno(swapon(p, arg2));
6336 unlock_user(p, arg1, 0);
6337 break;
6338 #endif
6339 case TARGET_NR_reboot:
6340 if (!(p = lock_user_string(arg4)))
6341 goto efault;
6342 ret = reboot(arg1, arg2, arg3, p);
6343 unlock_user(p, arg4, 0);
6344 break;
6345 #ifdef TARGET_NR_readdir
6346 case TARGET_NR_readdir:
6347 goto unimplemented;
6348 #endif
6349 #ifdef TARGET_NR_mmap
6350 case TARGET_NR_mmap:
6351 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6352 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6353 || defined(TARGET_S390X)
6354 {
6355 abi_ulong *v;
6356 abi_ulong v1, v2, v3, v4, v5, v6;
6357 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6358 goto efault;
6359 v1 = tswapal(v[0]);
6360 v2 = tswapal(v[1]);
6361 v3 = tswapal(v[2]);
6362 v4 = tswapal(v[3]);
6363 v5 = tswapal(v[4]);
6364 v6 = tswapal(v[5]);
6365 unlock_user(v, arg1, 0);
6366 ret = get_errno(target_mmap(v1, v2, v3,
6367 target_to_host_bitmask(v4, mmap_flags_tbl),
6368 v5, v6));
6369 }
6370 #else
6371 ret = get_errno(target_mmap(arg1, arg2, arg3,
6372 target_to_host_bitmask(arg4, mmap_flags_tbl),
6373 arg5,
6374 arg6));
6375 #endif
6376 break;
6377 #endif
6378 #ifdef TARGET_NR_mmap2
6379 case TARGET_NR_mmap2:
6380 #ifndef MMAP_SHIFT
6381 #define MMAP_SHIFT 12
6382 #endif
6383 ret = get_errno(target_mmap(arg1, arg2, arg3,
6384 target_to_host_bitmask(arg4, mmap_flags_tbl),
6385 arg5,
6386 arg6 << MMAP_SHIFT));
6387 break;
6388 #endif
6389 case TARGET_NR_munmap:
6390 ret = get_errno(target_munmap(arg1, arg2));
6391 break;
6392 case TARGET_NR_mprotect:
6393 {
6394 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6395 /* Special hack to detect libc making the stack executable. */
6396 if ((arg3 & PROT_GROWSDOWN)
6397 && arg1 >= ts->info->stack_limit
6398 && arg1 <= ts->info->start_stack) {
6399 arg3 &= ~PROT_GROWSDOWN;
6400 arg2 = arg2 + arg1 - ts->info->stack_limit;
6401 arg1 = ts->info->stack_limit;
6402 }
6403 }
6404 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6405 break;
6406 #ifdef TARGET_NR_mremap
6407 case TARGET_NR_mremap:
6408 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6409 break;
6410 #endif
6411 /* ??? msync/mlock/munlock are broken for softmmu. */
6412 #ifdef TARGET_NR_msync
6413 case TARGET_NR_msync:
6414 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6415 break;
6416 #endif
6417 #ifdef TARGET_NR_mlock
6418 case TARGET_NR_mlock:
6419 ret = get_errno(mlock(g2h(arg1), arg2));
6420 break;
6421 #endif
6422 #ifdef TARGET_NR_munlock
6423 case TARGET_NR_munlock:
6424 ret = get_errno(munlock(g2h(arg1), arg2));
6425 break;
6426 #endif
6427 #ifdef TARGET_NR_mlockall
6428 case TARGET_NR_mlockall:
6429 ret = get_errno(mlockall(arg1));
6430 break;
6431 #endif
6432 #ifdef TARGET_NR_munlockall
6433 case TARGET_NR_munlockall:
6434 ret = get_errno(munlockall());
6435 break;
6436 #endif
6437 case TARGET_NR_truncate:
6438 if (!(p = lock_user_string(arg1)))
6439 goto efault;
6440 ret = get_errno(truncate(p, arg2));
6441 unlock_user(p, arg1, 0);
6442 break;
6443 case TARGET_NR_ftruncate:
6444 ret = get_errno(ftruncate(arg1, arg2));
6445 break;
6446 case TARGET_NR_fchmod:
6447 ret = get_errno(fchmod(arg1, arg2));
6448 break;
6449 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6450 case TARGET_NR_fchmodat:
6451 if (!(p = lock_user_string(arg2)))
6452 goto efault;
6453 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6454 unlock_user(p, arg2, 0);
6455 break;
6456 #endif
6457 case TARGET_NR_getpriority:
6458 /* Note that negative values are valid for getpriority, so we must
6459 differentiate based on errno settings. */
6460 errno = 0;
6461 ret = getpriority(arg1, arg2);
6462 if (ret == -1 && errno != 0) {
6463 ret = -host_to_target_errno(errno);
6464 break;
6465 }
6466 #ifdef TARGET_ALPHA
6467 /* Return value is the unbiased priority. Signal no error. */
6468 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6469 #else
6470 /* Return value is a biased priority to avoid negative numbers. */
6471 ret = 20 - ret;
6472 #endif
6473 break;
6474 case TARGET_NR_setpriority:
6475 ret = get_errno(setpriority(arg1, arg2, arg3));
6476 break;
6477 #ifdef TARGET_NR_profil
6478 case TARGET_NR_profil:
6479 goto unimplemented;
6480 #endif
6481 case TARGET_NR_statfs:
6482 if (!(p = lock_user_string(arg1)))
6483 goto efault;
6484 ret = get_errno(statfs(path(p), &stfs));
6485 unlock_user(p, arg1, 0);
6486 convert_statfs:
6487 if (!is_error(ret)) {
6488 struct target_statfs *target_stfs;
6489
6490 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6491 goto efault;
6492 __put_user(stfs.f_type, &target_stfs->f_type);
6493 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6494 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6495 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6496 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6497 __put_user(stfs.f_files, &target_stfs->f_files);
6498 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6499 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6500 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6501 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6502 unlock_user_struct(target_stfs, arg2, 1);
6503 }
6504 break;
6505 case TARGET_NR_fstatfs:
6506 ret = get_errno(fstatfs(arg1, &stfs));
6507 goto convert_statfs;
6508 #ifdef TARGET_NR_statfs64
6509 case TARGET_NR_statfs64:
6510 if (!(p = lock_user_string(arg1)))
6511 goto efault;
6512 ret = get_errno(statfs(path(p), &stfs));
6513 unlock_user(p, arg1, 0);
6514 convert_statfs64:
6515 if (!is_error(ret)) {
6516 struct target_statfs64 *target_stfs;
6517
6518 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6519 goto efault;
6520 __put_user(stfs.f_type, &target_stfs->f_type);
6521 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6522 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6523 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6524 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6525 __put_user(stfs.f_files, &target_stfs->f_files);
6526 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6527 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6528 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6529 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6530 unlock_user_struct(target_stfs, arg3, 1);
6531 }
6532 break;
6533 case TARGET_NR_fstatfs64:
6534 ret = get_errno(fstatfs(arg1, &stfs));
6535 goto convert_statfs64;
6536 #endif
6537 #ifdef TARGET_NR_ioperm
6538 case TARGET_NR_ioperm:
6539 goto unimplemented;
6540 #endif
6541 #ifdef TARGET_NR_socketcall
6542 case TARGET_NR_socketcall:
6543 ret = do_socketcall(arg1, arg2);
6544 break;
6545 #endif
6546 #ifdef TARGET_NR_accept
6547 case TARGET_NR_accept:
6548 ret = do_accept(arg1, arg2, arg3);
6549 break;
6550 #endif
6551 #ifdef TARGET_NR_bind
6552 case TARGET_NR_bind:
6553 ret = do_bind(arg1, arg2, arg3);
6554 break;
6555 #endif
6556 #ifdef TARGET_NR_connect
6557 case TARGET_NR_connect:
6558 ret = do_connect(arg1, arg2, arg3);
6559 break;
6560 #endif
6561 #ifdef TARGET_NR_getpeername
6562 case TARGET_NR_getpeername:
6563 ret = do_getpeername(arg1, arg2, arg3);
6564 break;
6565 #endif
6566 #ifdef TARGET_NR_getsockname
6567 case TARGET_NR_getsockname:
6568 ret = do_getsockname(arg1, arg2, arg3);
6569 break;
6570 #endif
6571 #ifdef TARGET_NR_getsockopt
6572 case TARGET_NR_getsockopt:
6573 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6574 break;
6575 #endif
6576 #ifdef TARGET_NR_listen
6577 case TARGET_NR_listen:
6578 ret = get_errno(listen(arg1, arg2));
6579 break;
6580 #endif
6581 #ifdef TARGET_NR_recv
6582 case TARGET_NR_recv:
6583 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6584 break;
6585 #endif
6586 #ifdef TARGET_NR_recvfrom
6587 case TARGET_NR_recvfrom:
6588 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6589 break;
6590 #endif
6591 #ifdef TARGET_NR_recvmsg
6592 case TARGET_NR_recvmsg:
6593 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6594 break;
6595 #endif
6596 #ifdef TARGET_NR_send
6597 case TARGET_NR_send:
6598 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6599 break;
6600 #endif
6601 #ifdef TARGET_NR_sendmsg
6602 case TARGET_NR_sendmsg:
6603 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6604 break;
6605 #endif
6606 #ifdef TARGET_NR_sendto
6607 case TARGET_NR_sendto:
6608 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6609 break;
6610 #endif
6611 #ifdef TARGET_NR_shutdown
6612 case TARGET_NR_shutdown:
6613 ret = get_errno(shutdown(arg1, arg2));
6614 break;
6615 #endif
6616 #ifdef TARGET_NR_socket
6617 case TARGET_NR_socket:
6618 ret = do_socket(arg1, arg2, arg3);
6619 break;
6620 #endif
6621 #ifdef TARGET_NR_socketpair
6622 case TARGET_NR_socketpair:
6623 ret = do_socketpair(arg1, arg2, arg3, arg4);
6624 break;
6625 #endif
6626 #ifdef TARGET_NR_setsockopt
6627 case TARGET_NR_setsockopt:
6628 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6629 break;
6630 #endif
6631
6632 case TARGET_NR_syslog:
6633 if (!(p = lock_user_string(arg2)))
6634 goto efault;
6635 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6636 unlock_user(p, arg2, 0);
6637 break;
6638
6639 case TARGET_NR_setitimer:
6640 {
6641 struct itimerval value, ovalue, *pvalue;
6642
6643 if (arg2) {
6644 pvalue = &value;
6645 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6646 || copy_from_user_timeval(&pvalue->it_value,
6647 arg2 + sizeof(struct target_timeval)))
6648 goto efault;
6649 } else {
6650 pvalue = NULL;
6651 }
6652 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6653 if (!is_error(ret) && arg3) {
6654 if (copy_to_user_timeval(arg3,
6655 &ovalue.it_interval)
6656 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6657 &ovalue.it_value))
6658 goto efault;
6659 }
6660 }
6661 break;
6662 case TARGET_NR_getitimer:
6663 {
6664 struct itimerval value;
6665
6666 ret = get_errno(getitimer(arg1, &value));
6667 if (!is_error(ret) && arg2) {
6668 if (copy_to_user_timeval(arg2,
6669 &value.it_interval)
6670 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6671 &value.it_value))
6672 goto efault;
6673 }
6674 }
6675 break;
6676 case TARGET_NR_stat:
6677 if (!(p = lock_user_string(arg1)))
6678 goto efault;
6679 ret = get_errno(stat(path(p), &st));
6680 unlock_user(p, arg1, 0);
6681 goto do_stat;
6682 case TARGET_NR_lstat:
6683 if (!(p = lock_user_string(arg1)))
6684 goto efault;
6685 ret = get_errno(lstat(path(p), &st));
6686 unlock_user(p, arg1, 0);
6687 goto do_stat;
6688 case TARGET_NR_fstat:
6689 {
6690 ret = get_errno(fstat(arg1, &st));
6691 do_stat:
6692 if (!is_error(ret)) {
6693 struct target_stat *target_st;
6694
6695 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6696 goto efault;
6697 memset(target_st, 0, sizeof(*target_st));
6698 __put_user(st.st_dev, &target_st->st_dev);
6699 __put_user(st.st_ino, &target_st->st_ino);
6700 __put_user(st.st_mode, &target_st->st_mode);
6701 __put_user(st.st_uid, &target_st->st_uid);
6702 __put_user(st.st_gid, &target_st->st_gid);
6703 __put_user(st.st_nlink, &target_st->st_nlink);
6704 __put_user(st.st_rdev, &target_st->st_rdev);
6705 __put_user(st.st_size, &target_st->st_size);
6706 __put_user(st.st_blksize, &target_st->st_blksize);
6707 __put_user(st.st_blocks, &target_st->st_blocks);
6708 __put_user(st.st_atime, &target_st->target_st_atime);
6709 __put_user(st.st_mtime, &target_st->target_st_mtime);
6710 __put_user(st.st_ctime, &target_st->target_st_ctime);
6711 unlock_user_struct(target_st, arg2, 1);
6712 }
6713 }
6714 break;
6715 #ifdef TARGET_NR_olduname
6716 case TARGET_NR_olduname:
6717 goto unimplemented;
6718 #endif
6719 #ifdef TARGET_NR_iopl
6720 case TARGET_NR_iopl:
6721 goto unimplemented;
6722 #endif
6723 case TARGET_NR_vhangup:
6724 ret = get_errno(vhangup());
6725 break;
6726 #ifdef TARGET_NR_idle
6727 case TARGET_NR_idle:
6728 goto unimplemented;
6729 #endif
6730 #ifdef TARGET_NR_syscall
6731 case TARGET_NR_syscall:
6732 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6733 arg6, arg7, arg8, 0);
6734 break;
6735 #endif
6736 case TARGET_NR_wait4:
6737 {
6738 int status;
6739 abi_long status_ptr = arg2;
6740 struct rusage rusage, *rusage_ptr;
6741 abi_ulong target_rusage = arg4;
6742 if (target_rusage)
6743 rusage_ptr = &rusage;
6744 else
6745 rusage_ptr = NULL;
6746 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6747 if (!is_error(ret)) {
6748 if (status_ptr && ret) {
6749 status = host_to_target_waitstatus(status);
6750 if (put_user_s32(status, status_ptr))
6751 goto efault;
6752 }
6753 if (target_rusage)
6754 host_to_target_rusage(target_rusage, &rusage);
6755 }
6756 }
6757 break;
6758 #ifdef TARGET_NR_swapoff
6759 case TARGET_NR_swapoff:
6760 if (!(p = lock_user_string(arg1)))
6761 goto efault;
6762 ret = get_errno(swapoff(p));
6763 unlock_user(p, arg1, 0);
6764 break;
6765 #endif
6766 case TARGET_NR_sysinfo:
6767 {
6768 struct target_sysinfo *target_value;
6769 struct sysinfo value;
6770 ret = get_errno(sysinfo(&value));
6771 if (!is_error(ret) && arg1)
6772 {
6773 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6774 goto efault;
6775 __put_user(value.uptime, &target_value->uptime);
6776 __put_user(value.loads[0], &target_value->loads[0]);
6777 __put_user(value.loads[1], &target_value->loads[1]);
6778 __put_user(value.loads[2], &target_value->loads[2]);
6779 __put_user(value.totalram, &target_value->totalram);
6780 __put_user(value.freeram, &target_value->freeram);
6781 __put_user(value.sharedram, &target_value->sharedram);
6782 __put_user(value.bufferram, &target_value->bufferram);
6783 __put_user(value.totalswap, &target_value->totalswap);
6784 __put_user(value.freeswap, &target_value->freeswap);
6785 __put_user(value.procs, &target_value->procs);
6786 __put_user(value.totalhigh, &target_value->totalhigh);
6787 __put_user(value.freehigh, &target_value->freehigh);
6788 __put_user(value.mem_unit, &target_value->mem_unit);
6789 unlock_user_struct(target_value, arg1, 1);
6790 }
6791 }
6792 break;
6793 #ifdef TARGET_NR_ipc
6794 case TARGET_NR_ipc:
6795 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6796 break;
6797 #endif
6798 #ifdef TARGET_NR_semget
6799 case TARGET_NR_semget:
6800 ret = get_errno(semget(arg1, arg2, arg3));
6801 break;
6802 #endif
6803 #ifdef TARGET_NR_semop
6804 case TARGET_NR_semop:
6805 ret = get_errno(do_semop(arg1, arg2, arg3));
6806 break;
6807 #endif
6808 #ifdef TARGET_NR_semctl
6809 case TARGET_NR_semctl:
6810 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6811 break;
6812 #endif
6813 #ifdef TARGET_NR_msgctl
6814 case TARGET_NR_msgctl:
6815 ret = do_msgctl(arg1, arg2, arg3);
6816 break;
6817 #endif
6818 #ifdef TARGET_NR_msgget
6819 case TARGET_NR_msgget:
6820 ret = get_errno(msgget(arg1, arg2));
6821 break;
6822 #endif
6823 #ifdef TARGET_NR_msgrcv
6824 case TARGET_NR_msgrcv:
6825 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6826 break;
6827 #endif
6828 #ifdef TARGET_NR_msgsnd
6829 case TARGET_NR_msgsnd:
6830 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6831 break;
6832 #endif
6833 #ifdef TARGET_NR_shmget
6834 case TARGET_NR_shmget:
6835 ret = get_errno(shmget(arg1, arg2, arg3));
6836 break;
6837 #endif
6838 #ifdef TARGET_NR_shmctl
6839 case TARGET_NR_shmctl:
6840 ret = do_shmctl(arg1, arg2, arg3);
6841 break;
6842 #endif
6843 #ifdef TARGET_NR_shmat
6844 case TARGET_NR_shmat:
6845 ret = do_shmat(arg1, arg2, arg3);
6846 break;
6847 #endif
6848 #ifdef TARGET_NR_shmdt
6849 case TARGET_NR_shmdt:
6850 ret = do_shmdt(arg1);
6851 break;
6852 #endif
6853 case TARGET_NR_fsync:
6854 ret = get_errno(fsync(arg1));
6855 break;
6856 case TARGET_NR_clone:
6857 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6858 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6859 #elif defined(TARGET_CRIS)
6860 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6861 #elif defined(TARGET_S390X)
6862 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6863 #else
6864 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6865 #endif
6866 break;
6867 #ifdef __NR_exit_group
6868 /* new thread calls */
6869 case TARGET_NR_exit_group:
6870 #ifdef TARGET_GPROF
6871 _mcleanup();
6872 #endif
6873 gdb_exit(cpu_env, arg1);
6874 ret = get_errno(exit_group(arg1));
6875 break;
6876 #endif
6877 case TARGET_NR_setdomainname:
6878 if (!(p = lock_user_string(arg1)))
6879 goto efault;
6880 ret = get_errno(setdomainname(p, arg2));
6881 unlock_user(p, arg1, 0);
6882 break;
6883 case TARGET_NR_uname:
6884 /* no need to transcode because we use the linux syscall */
6885 {
6886 struct new_utsname * buf;
6887
6888 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6889 goto efault;
6890 ret = get_errno(sys_uname(buf));
6891 if (!is_error(ret)) {
6892 /* Overrite the native machine name with whatever is being
6893 emulated. */
6894 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6895 /* Allow the user to override the reported release. */
6896 if (qemu_uname_release && *qemu_uname_release)
6897 strcpy (buf->release, qemu_uname_release);
6898 }
6899 unlock_user_struct(buf, arg1, 1);
6900 }
6901 break;
6902 #ifdef TARGET_I386
6903 case TARGET_NR_modify_ldt:
6904 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6905 break;
6906 #if !defined(TARGET_X86_64)
6907 case TARGET_NR_vm86old:
6908 goto unimplemented;
6909 case TARGET_NR_vm86:
6910 ret = do_vm86(cpu_env, arg1, arg2);
6911 break;
6912 #endif
6913 #endif
6914 case TARGET_NR_adjtimex:
6915 goto unimplemented;
6916 #ifdef TARGET_NR_create_module
6917 case TARGET_NR_create_module:
6918 #endif
6919 case TARGET_NR_init_module:
6920 case TARGET_NR_delete_module:
6921 #ifdef TARGET_NR_get_kernel_syms
6922 case TARGET_NR_get_kernel_syms:
6923 #endif
6924 goto unimplemented;
6925 case TARGET_NR_quotactl:
6926 goto unimplemented;
6927 case TARGET_NR_getpgid:
6928 ret = get_errno(getpgid(arg1));
6929 break;
6930 case TARGET_NR_fchdir:
6931 ret = get_errno(fchdir(arg1));
6932 break;
6933 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6934 case TARGET_NR_bdflush:
6935 goto unimplemented;
6936 #endif
6937 #ifdef TARGET_NR_sysfs
6938 case TARGET_NR_sysfs:
6939 goto unimplemented;
6940 #endif
6941 case TARGET_NR_personality:
6942 ret = get_errno(personality(arg1));
6943 break;
6944 #ifdef TARGET_NR_afs_syscall
6945 case TARGET_NR_afs_syscall:
6946 goto unimplemented;
6947 #endif
6948 #ifdef TARGET_NR__llseek /* Not on alpha */
6949 case TARGET_NR__llseek:
6950 {
6951 int64_t res;
6952 #if !defined(__NR_llseek)
6953 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6954 if (res == -1) {
6955 ret = get_errno(res);
6956 } else {
6957 ret = 0;
6958 }
6959 #else
6960 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6961 #endif
6962 if ((ret == 0) && put_user_s64(res, arg4)) {
6963 goto efault;
6964 }
6965 }
6966 break;
6967 #endif
6968 case TARGET_NR_getdents:
6969 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6970 {
6971 struct target_dirent *target_dirp;
6972 struct linux_dirent *dirp;
6973 abi_long count = arg3;
6974
6975 dirp = malloc(count);
6976 if (!dirp) {
6977 ret = -TARGET_ENOMEM;
6978 goto fail;
6979 }
6980
6981 ret = get_errno(sys_getdents(arg1, dirp, count));
6982 if (!is_error(ret)) {
6983 struct linux_dirent *de;
6984 struct target_dirent *tde;
6985 int len = ret;
6986 int reclen, treclen;
6987 int count1, tnamelen;
6988
6989 count1 = 0;
6990 de = dirp;
6991 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6992 goto efault;
6993 tde = target_dirp;
6994 while (len > 0) {
6995 reclen = de->d_reclen;
6996 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6997 tde->d_reclen = tswap16(treclen);
6998 tde->d_ino = tswapal(de->d_ino);
6999 tde->d_off = tswapal(de->d_off);
7000 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
7001 if (tnamelen > 256)
7002 tnamelen = 256;
7003 /* XXX: may not be correct */
7004 pstrcpy(tde->d_name, tnamelen, de->d_name);
7005 de = (struct linux_dirent *)((char *)de + reclen);
7006 len -= reclen;
7007 tde = (struct target_dirent *)((char *)tde + treclen);
7008 count1 += treclen;
7009 }
7010 ret = count1;
7011 unlock_user(target_dirp, arg2, ret);
7012 }
7013 free(dirp);
7014 }
7015 #else
7016 {
7017 struct linux_dirent *dirp;
7018 abi_long count = arg3;
7019
7020 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7021 goto efault;
7022 ret = get_errno(sys_getdents(arg1, dirp, count));
7023 if (!is_error(ret)) {
7024 struct linux_dirent *de;
7025 int len = ret;
7026 int reclen;
7027 de = dirp;
7028 while (len > 0) {
7029 reclen = de->d_reclen;
7030 if (reclen > len)
7031 break;
7032 de->d_reclen = tswap16(reclen);
7033 tswapls(&de->d_ino);
7034 tswapls(&de->d_off);
7035 de = (struct linux_dirent *)((char *)de + reclen);
7036 len -= reclen;
7037 }
7038 }
7039 unlock_user(dirp, arg2, ret);
7040 }
7041 #endif
7042 break;
7043 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7044 case TARGET_NR_getdents64:
7045 {
7046 struct linux_dirent64 *dirp;
7047 abi_long count = arg3;
7048 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7049 goto efault;
7050 ret = get_errno(sys_getdents64(arg1, dirp, count));
7051 if (!is_error(ret)) {
7052 struct linux_dirent64 *de;
7053 int len = ret;
7054 int reclen;
7055 de = dirp;
7056 while (len > 0) {
7057 reclen = de->d_reclen;
7058 if (reclen > len)
7059 break;
7060 de->d_reclen = tswap16(reclen);
7061 tswap64s((uint64_t *)&de->d_ino);
7062 tswap64s((uint64_t *)&de->d_off);
7063 de = (struct linux_dirent64 *)((char *)de + reclen);
7064 len -= reclen;
7065 }
7066 }
7067 unlock_user(dirp, arg2, ret);
7068 }
7069 break;
7070 #endif /* TARGET_NR_getdents64 */
7071 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
7072 #ifdef TARGET_S390X
7073 case TARGET_NR_select:
7074 #else
7075 case TARGET_NR__newselect:
7076 #endif
7077 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7078 break;
7079 #endif
7080 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7081 # ifdef TARGET_NR_poll
7082 case TARGET_NR_poll:
7083 # endif
7084 # ifdef TARGET_NR_ppoll
7085 case TARGET_NR_ppoll:
7086 # endif
7087 {
7088 struct target_pollfd *target_pfd;
7089 unsigned int nfds = arg2;
7090 int timeout = arg3;
7091 struct pollfd *pfd;
7092 unsigned int i;
7093
7094 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7095 if (!target_pfd)
7096 goto efault;
7097
7098 pfd = alloca(sizeof(struct pollfd) * nfds);
7099 for(i = 0; i < nfds; i++) {
7100 pfd[i].fd = tswap32(target_pfd[i].fd);
7101 pfd[i].events = tswap16(target_pfd[i].events);
7102 }
7103
7104 # ifdef TARGET_NR_ppoll
7105 if (num == TARGET_NR_ppoll) {
7106 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7107 target_sigset_t *target_set;
7108 sigset_t _set, *set = &_set;
7109
7110 if (arg3) {
7111 if (target_to_host_timespec(timeout_ts, arg3)) {
7112 unlock_user(target_pfd, arg1, 0);
7113 goto efault;
7114 }
7115 } else {
7116 timeout_ts = NULL;
7117 }
7118
7119 if (arg4) {
7120 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7121 if (!target_set) {
7122 unlock_user(target_pfd, arg1, 0);
7123 goto efault;
7124 }
7125 target_to_host_sigset(set, target_set);
7126 } else {
7127 set = NULL;
7128 }
7129
7130 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7131
7132 if (!is_error(ret) && arg3) {
7133 host_to_target_timespec(arg3, timeout_ts);
7134 }
7135 if (arg4) {
7136 unlock_user(target_set, arg4, 0);
7137 }
7138 } else
7139 # endif
7140 ret = get_errno(poll(pfd, nfds, timeout));
7141
7142 if (!is_error(ret)) {
7143 for(i = 0; i < nfds; i++) {
7144 target_pfd[i].revents = tswap16(pfd[i].revents);
7145 }
7146 }
7147 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7148 }
7149 break;
7150 #endif
7151 case TARGET_NR_flock:
7152 /* NOTE: the flock constant seems to be the same for every
7153 Linux platform */
7154 ret = get_errno(flock(arg1, arg2));
7155 break;
7156 case TARGET_NR_readv:
7157 {
7158 int count = arg3;
7159 struct iovec *vec;
7160
7161 vec = alloca(count * sizeof(struct iovec));
7162 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
7163 goto efault;
7164 ret = get_errno(readv(arg1, vec, count));
7165 unlock_iovec(vec, arg2, count, 1);
7166 }
7167 break;
7168 case TARGET_NR_writev:
7169 {
7170 int count = arg3;
7171 struct iovec *vec;
7172
7173 vec = alloca(count * sizeof(struct iovec));
7174 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7175 goto efault;
7176 ret = get_errno(writev(arg1, vec, count));
7177 unlock_iovec(vec, arg2, count, 0);
7178 }
7179 break;
7180 case TARGET_NR_getsid:
7181 ret = get_errno(getsid(arg1));
7182 break;
7183 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7184 case TARGET_NR_fdatasync:
7185 ret = get_errno(fdatasync(arg1));
7186 break;
7187 #endif
7188 case TARGET_NR__sysctl:
7189 /* We don't implement this, but ENOTDIR is always a safe
7190 return value. */
7191 ret = -TARGET_ENOTDIR;
7192 break;
7193 case TARGET_NR_sched_getaffinity:
7194 {
7195 unsigned int mask_size;
7196 unsigned long *mask;
7197
7198 /*
7199 * sched_getaffinity needs multiples of ulong, so need to take
7200 * care of mismatches between target ulong and host ulong sizes.
7201 */
7202 if (arg2 & (sizeof(abi_ulong) - 1)) {
7203 ret = -TARGET_EINVAL;
7204 break;
7205 }
7206 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7207
7208 mask = alloca(mask_size);
7209 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7210
7211 if (!is_error(ret)) {
7212 if (copy_to_user(arg3, mask, ret)) {
7213 goto efault;
7214 }
7215 }
7216 }
7217 break;
7218 case TARGET_NR_sched_setaffinity:
7219 {
7220 unsigned int mask_size;
7221 unsigned long *mask;
7222
7223 /*
7224 * sched_setaffinity needs multiples of ulong, so need to take
7225 * care of mismatches between target ulong and host ulong sizes.
7226 */
7227 if (arg2 & (sizeof(abi_ulong) - 1)) {
7228 ret = -TARGET_EINVAL;
7229 break;
7230 }
7231 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7232
7233 mask = alloca(mask_size);
7234 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7235 goto efault;
7236 }
7237 memcpy(mask, p, arg2);
7238 unlock_user_struct(p, arg2, 0);
7239
7240 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7241 }
7242 break;
7243 case TARGET_NR_sched_setparam:
7244 {
7245 struct sched_param *target_schp;
7246 struct sched_param schp;
7247
7248 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7249 goto efault;
7250 schp.sched_priority = tswap32(target_schp->sched_priority);
7251 unlock_user_struct(target_schp, arg2, 0);
7252 ret = get_errno(sched_setparam(arg1, &schp));
7253 }
7254 break;
7255 case TARGET_NR_sched_getparam:
7256 {
7257 struct sched_param *target_schp;
7258 struct sched_param schp;
7259 ret = get_errno(sched_getparam(arg1, &schp));
7260 if (!is_error(ret)) {
7261 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7262 goto efault;
7263 target_schp->sched_priority = tswap32(schp.sched_priority);
7264 unlock_user_struct(target_schp, arg2, 1);
7265 }
7266 }
7267 break;
7268 case TARGET_NR_sched_setscheduler:
7269 {
7270 struct sched_param *target_schp;
7271 struct sched_param schp;
7272 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7273 goto efault;
7274 schp.sched_priority = tswap32(target_schp->sched_priority);
7275 unlock_user_struct(target_schp, arg3, 0);
7276 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7277 }
7278 break;
7279 case TARGET_NR_sched_getscheduler:
7280 ret = get_errno(sched_getscheduler(arg1));
7281 break;
7282 case TARGET_NR_sched_yield:
7283 ret = get_errno(sched_yield());
7284 break;
7285 case TARGET_NR_sched_get_priority_max:
7286 ret = get_errno(sched_get_priority_max(arg1));
7287 break;
7288 case TARGET_NR_sched_get_priority_min:
7289 ret = get_errno(sched_get_priority_min(arg1));
7290 break;
7291 case TARGET_NR_sched_rr_get_interval:
7292 {
7293 struct timespec ts;
7294 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7295 if (!is_error(ret)) {
7296 host_to_target_timespec(arg2, &ts);
7297 }
7298 }
7299 break;
7300 case TARGET_NR_nanosleep:
7301 {
7302 struct timespec req, rem;
7303 target_to_host_timespec(&req, arg1);
7304 ret = get_errno(nanosleep(&req, &rem));
7305 if (is_error(ret) && arg2) {
7306 host_to_target_timespec(arg2, &rem);
7307 }
7308 }
7309 break;
7310 #ifdef TARGET_NR_query_module
7311 case TARGET_NR_query_module:
7312 goto unimplemented;
7313 #endif
7314 #ifdef TARGET_NR_nfsservctl
7315 case TARGET_NR_nfsservctl:
7316 goto unimplemented;
7317 #endif
7318 case TARGET_NR_prctl:
7319 switch (arg1) {
7320 case PR_GET_PDEATHSIG:
7321 {
7322 int deathsig;
7323 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7324 if (!is_error(ret) && arg2
7325 && put_user_ual(deathsig, arg2)) {
7326 goto efault;
7327 }
7328 break;
7329 }
7330 #ifdef PR_GET_NAME
7331 case PR_GET_NAME:
7332 {
7333 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7334 if (!name) {
7335 goto efault;
7336 }
7337 ret = get_errno(prctl(arg1, (unsigned long)name,
7338 arg3, arg4, arg5));
7339 unlock_user(name, arg2, 16);
7340 break;
7341 }
7342 case PR_SET_NAME:
7343 {
7344 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7345 if (!name) {
7346 goto efault;
7347 }
7348 ret = get_errno(prctl(arg1, (unsigned long)name,
7349 arg3, arg4, arg5));
7350 unlock_user(name, arg2, 0);
7351 break;
7352 }
7353 #endif
7354 default:
7355 /* Most prctl options have no pointer arguments */
7356 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7357 break;
7358 }
7359 break;
7360 #ifdef TARGET_NR_arch_prctl
7361 case TARGET_NR_arch_prctl:
7362 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7363 ret = do_arch_prctl(cpu_env, arg1, arg2);
7364 break;
7365 #else
7366 goto unimplemented;
7367 #endif
7368 #endif
7369 #ifdef TARGET_NR_pread
7370 case TARGET_NR_pread:
7371 if (regpairs_aligned(cpu_env))
7372 arg4 = arg5;
7373 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7374 goto efault;
7375 ret = get_errno(pread(arg1, p, arg3, arg4));
7376 unlock_user(p, arg2, ret);
7377 break;
7378 case TARGET_NR_pwrite:
7379 if (regpairs_aligned(cpu_env))
7380 arg4 = arg5;
7381 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7382 goto efault;
7383 ret = get_errno(pwrite(arg1, p, arg3, arg4));
7384 unlock_user(p, arg2, 0);
7385 break;
7386 #endif
7387 #ifdef TARGET_NR_pread64
7388 case TARGET_NR_pread64:
7389 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7390 goto efault;
7391 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7392 unlock_user(p, arg2, ret);
7393 break;
7394 case TARGET_NR_pwrite64:
7395 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7396 goto efault;
7397 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7398 unlock_user(p, arg2, 0);
7399 break;
7400 #endif
7401 case TARGET_NR_getcwd:
7402 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7403 goto efault;
7404 ret = get_errno(sys_getcwd1(p, arg2));
7405 unlock_user(p, arg1, ret);
7406 break;
7407 case TARGET_NR_capget:
7408 goto unimplemented;
7409 case TARGET_NR_capset:
7410 goto unimplemented;
7411 case TARGET_NR_sigaltstack:
7412 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7413 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7414 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7415 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7416 break;
7417 #else
7418 goto unimplemented;
7419 #endif
7420 case TARGET_NR_sendfile:
7421 goto unimplemented;
7422 #ifdef TARGET_NR_getpmsg
7423 case TARGET_NR_getpmsg:
7424 goto unimplemented;
7425 #endif
7426 #ifdef TARGET_NR_putpmsg
7427 case TARGET_NR_putpmsg:
7428 goto unimplemented;
7429 #endif
7430 #ifdef TARGET_NR_vfork
7431 case TARGET_NR_vfork:
7432 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7433 0, 0, 0, 0));
7434 break;
7435 #endif
7436 #ifdef TARGET_NR_ugetrlimit
7437 case TARGET_NR_ugetrlimit:
7438 {
7439 struct rlimit rlim;
7440 int resource = target_to_host_resource(arg1);
7441 ret = get_errno(getrlimit(resource, &rlim));
7442 if (!is_error(ret)) {
7443 struct target_rlimit *target_rlim;
7444 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7445 goto efault;
7446 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7447 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7448 unlock_user_struct(target_rlim, arg2, 1);
7449 }
7450 break;
7451 }
7452 #endif
7453 #ifdef TARGET_NR_truncate64
7454 case TARGET_NR_truncate64:
7455 if (!(p = lock_user_string(arg1)))
7456 goto efault;
7457 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7458 unlock_user(p, arg1, 0);
7459 break;
7460 #endif
7461 #ifdef TARGET_NR_ftruncate64
7462 case TARGET_NR_ftruncate64:
7463 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7464 break;
7465 #endif
7466 #ifdef TARGET_NR_stat64
7467 case TARGET_NR_stat64:
7468 if (!(p = lock_user_string(arg1)))
7469 goto efault;
7470 ret = get_errno(stat(path(p), &st));
7471 unlock_user(p, arg1, 0);
7472 if (!is_error(ret))
7473 ret = host_to_target_stat64(cpu_env, arg2, &st);
7474 break;
7475 #endif
7476 #ifdef TARGET_NR_lstat64
7477 case TARGET_NR_lstat64:
7478 if (!(p = lock_user_string(arg1)))
7479 goto efault;
7480 ret = get_errno(lstat(path(p), &st));
7481 unlock_user(p, arg1, 0);
7482 if (!is_error(ret))
7483 ret = host_to_target_stat64(cpu_env, arg2, &st);
7484 break;
7485 #endif
7486 #ifdef TARGET_NR_fstat64
7487 case TARGET_NR_fstat64:
7488 ret = get_errno(fstat(arg1, &st));
7489 if (!is_error(ret))
7490 ret = host_to_target_stat64(cpu_env, arg2, &st);
7491 break;
7492 #endif
7493 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7494 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7495 #ifdef TARGET_NR_fstatat64
7496 case TARGET_NR_fstatat64:
7497 #endif
7498 #ifdef TARGET_NR_newfstatat
7499 case TARGET_NR_newfstatat:
7500 #endif
7501 if (!(p = lock_user_string(arg2)))
7502 goto efault;
7503 #ifdef __NR_fstatat64
7504 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7505 #else
7506 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7507 #endif
7508 if (!is_error(ret))
7509 ret = host_to_target_stat64(cpu_env, arg3, &st);
7510 break;
7511 #endif
7512 case TARGET_NR_lchown:
7513 if (!(p = lock_user_string(arg1)))
7514 goto efault;
7515 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7516 unlock_user(p, arg1, 0);
7517 break;
7518 #ifdef TARGET_NR_getuid
7519 case TARGET_NR_getuid:
7520 ret = get_errno(high2lowuid(getuid()));
7521 break;
7522 #endif
7523 #ifdef TARGET_NR_getgid
7524 case TARGET_NR_getgid:
7525 ret = get_errno(high2lowgid(getgid()));
7526 break;
7527 #endif
7528 #ifdef TARGET_NR_geteuid
7529 case TARGET_NR_geteuid:
7530 ret = get_errno(high2lowuid(geteuid()));
7531 break;
7532 #endif
7533 #ifdef TARGET_NR_getegid
7534 case TARGET_NR_getegid:
7535 ret = get_errno(high2lowgid(getegid()));
7536 break;
7537 #endif
7538 case TARGET_NR_setreuid:
7539 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7540 break;
7541 case TARGET_NR_setregid:
7542 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7543 break;
7544 case TARGET_NR_getgroups:
7545 {
7546 int gidsetsize = arg1;
7547 target_id *target_grouplist;
7548 gid_t *grouplist;
7549 int i;
7550
7551 grouplist = alloca(gidsetsize * sizeof(gid_t));
7552 ret = get_errno(getgroups(gidsetsize, grouplist));
7553 if (gidsetsize == 0)
7554 break;
7555 if (!is_error(ret)) {
7556 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7557 if (!target_grouplist)
7558 goto efault;
7559 for(i = 0;i < ret; i++)
7560 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7561 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7562 }
7563 }
7564 break;
7565 case TARGET_NR_setgroups:
7566 {
7567 int gidsetsize = arg1;
7568 target_id *target_grouplist;
7569 gid_t *grouplist;
7570 int i;
7571
7572 grouplist = alloca(gidsetsize * sizeof(gid_t));
7573 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7574 if (!target_grouplist) {
7575 ret = -TARGET_EFAULT;
7576 goto fail;
7577 }
7578 for(i = 0;i < gidsetsize; i++)
7579 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7580 unlock_user(target_grouplist, arg2, 0);
7581 ret = get_errno(setgroups(gidsetsize, grouplist));
7582 }
7583 break;
7584 case TARGET_NR_fchown:
7585 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7586 break;
7587 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7588 case TARGET_NR_fchownat:
7589 if (!(p = lock_user_string(arg2)))
7590 goto efault;
7591 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7592 unlock_user(p, arg2, 0);
7593 break;
7594 #endif
7595 #ifdef TARGET_NR_setresuid
7596 case TARGET_NR_setresuid:
7597 ret = get_errno(setresuid(low2highuid(arg1),
7598 low2highuid(arg2),
7599 low2highuid(arg3)));
7600 break;
7601 #endif
7602 #ifdef TARGET_NR_getresuid
7603 case TARGET_NR_getresuid:
7604 {
7605 uid_t ruid, euid, suid;
7606 ret = get_errno(getresuid(&ruid, &euid, &suid));
7607 if (!is_error(ret)) {
7608 if (put_user_u16(high2lowuid(ruid), arg1)
7609 || put_user_u16(high2lowuid(euid), arg2)
7610 || put_user_u16(high2lowuid(suid), arg3))
7611 goto efault;
7612 }
7613 }
7614 break;
7615 #endif
7616 #ifdef TARGET_NR_getresgid
7617 case TARGET_NR_setresgid:
7618 ret = get_errno(setresgid(low2highgid(arg1),
7619 low2highgid(arg2),
7620 low2highgid(arg3)));
7621 break;
7622 #endif
7623 #ifdef TARGET_NR_getresgid
7624 case TARGET_NR_getresgid:
7625 {
7626 gid_t rgid, egid, sgid;
7627 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7628 if (!is_error(ret)) {
7629 if (put_user_u16(high2lowgid(rgid), arg1)
7630 || put_user_u16(high2lowgid(egid), arg2)
7631 || put_user_u16(high2lowgid(sgid), arg3))
7632 goto efault;
7633 }
7634 }
7635 break;
7636 #endif
7637 case TARGET_NR_chown:
7638 if (!(p = lock_user_string(arg1)))
7639 goto efault;
7640 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7641 unlock_user(p, arg1, 0);
7642 break;
7643 case TARGET_NR_setuid:
7644 ret = get_errno(setuid(low2highuid(arg1)));
7645 break;
7646 case TARGET_NR_setgid:
7647 ret = get_errno(setgid(low2highgid(arg1)));
7648 break;
7649 case TARGET_NR_setfsuid:
7650 ret = get_errno(setfsuid(arg1));
7651 break;
7652 case TARGET_NR_setfsgid:
7653 ret = get_errno(setfsgid(arg1));
7654 break;
7655
7656 #ifdef TARGET_NR_lchown32
7657 case TARGET_NR_lchown32:
7658 if (!(p = lock_user_string(arg1)))
7659 goto efault;
7660 ret = get_errno(lchown(p, arg2, arg3));
7661 unlock_user(p, arg1, 0);
7662 break;
7663 #endif
7664 #ifdef TARGET_NR_getuid32
7665 case TARGET_NR_getuid32:
7666 ret = get_errno(getuid());
7667 break;
7668 #endif
7669
7670 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7671 /* Alpha specific */
7672 case TARGET_NR_getxuid:
7673 {
7674 uid_t euid;
7675 euid=geteuid();
7676 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7677 }
7678 ret = get_errno(getuid());
7679 break;
7680 #endif
7681 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7682 /* Alpha specific */
7683 case TARGET_NR_getxgid:
7684 {
7685 uid_t egid;
7686 egid=getegid();
7687 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7688 }
7689 ret = get_errno(getgid());
7690 break;
7691 #endif
7692 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7693 /* Alpha specific */
7694 case TARGET_NR_osf_getsysinfo:
7695 ret = -TARGET_EOPNOTSUPP;
7696 switch (arg1) {
7697 case TARGET_GSI_IEEE_FP_CONTROL:
7698 {
7699 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7700
7701 /* Copied from linux ieee_fpcr_to_swcr. */
7702 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7703 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7704 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7705 | SWCR_TRAP_ENABLE_DZE
7706 | SWCR_TRAP_ENABLE_OVF);
7707 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7708 | SWCR_TRAP_ENABLE_INE);
7709 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7710 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7711
7712 if (put_user_u64 (swcr, arg2))
7713 goto efault;
7714 ret = 0;
7715 }
7716 break;
7717
7718 /* case GSI_IEEE_STATE_AT_SIGNAL:
7719 -- Not implemented in linux kernel.
7720 case GSI_UACPROC:
7721 -- Retrieves current unaligned access state; not much used.
7722 case GSI_PROC_TYPE:
7723 -- Retrieves implver information; surely not used.
7724 case GSI_GET_HWRPB:
7725 -- Grabs a copy of the HWRPB; surely not used.
7726 */
7727 }
7728 break;
7729 #endif
7730 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7731 /* Alpha specific */
7732 case TARGET_NR_osf_setsysinfo:
7733 ret = -TARGET_EOPNOTSUPP;
7734 switch (arg1) {
7735 case TARGET_SSI_IEEE_FP_CONTROL:
7736 {
7737 uint64_t swcr, fpcr, orig_fpcr;
7738
7739 if (get_user_u64 (swcr, arg2)) {
7740 goto efault;
7741 }
7742 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7743 fpcr = orig_fpcr & FPCR_DYN_MASK;
7744
7745 /* Copied from linux ieee_swcr_to_fpcr. */
7746 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7747 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7748 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7749 | SWCR_TRAP_ENABLE_DZE
7750 | SWCR_TRAP_ENABLE_OVF)) << 48;
7751 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7752 | SWCR_TRAP_ENABLE_INE)) << 57;
7753 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7754 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7755
7756 cpu_alpha_store_fpcr(cpu_env, fpcr);
7757 ret = 0;
7758 }
7759 break;
7760
7761 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7762 {
7763 uint64_t exc, fpcr, orig_fpcr;
7764 int si_code;
7765
7766 if (get_user_u64(exc, arg2)) {
7767 goto efault;
7768 }
7769
7770 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7771
7772 /* We only add to the exception status here. */
7773 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7774
7775 cpu_alpha_store_fpcr(cpu_env, fpcr);
7776 ret = 0;
7777
7778 /* Old exceptions are not signaled. */
7779 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7780
7781 /* If any exceptions set by this call,
7782 and are unmasked, send a signal. */
7783 si_code = 0;
7784 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7785 si_code = TARGET_FPE_FLTRES;
7786 }
7787 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7788 si_code = TARGET_FPE_FLTUND;
7789 }
7790 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7791 si_code = TARGET_FPE_FLTOVF;
7792 }
7793 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7794 si_code = TARGET_FPE_FLTDIV;
7795 }
7796 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7797 si_code = TARGET_FPE_FLTINV;
7798 }
7799 if (si_code != 0) {
7800 target_siginfo_t info;
7801 info.si_signo = SIGFPE;
7802 info.si_errno = 0;
7803 info.si_code = si_code;
7804 info._sifields._sigfault._addr
7805 = ((CPUArchState *)cpu_env)->pc;
7806 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7807 }
7808 }
7809 break;
7810
7811 /* case SSI_NVPAIRS:
7812 -- Used with SSIN_UACPROC to enable unaligned accesses.
7813 case SSI_IEEE_STATE_AT_SIGNAL:
7814 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7815 -- Not implemented in linux kernel
7816 */
7817 }
7818 break;
7819 #endif
7820 #ifdef TARGET_NR_osf_sigprocmask
7821 /* Alpha specific. */
7822 case TARGET_NR_osf_sigprocmask:
7823 {
7824 abi_ulong mask;
7825 int how;
7826 sigset_t set, oldset;
7827
7828 switch(arg1) {
7829 case TARGET_SIG_BLOCK:
7830 how = SIG_BLOCK;
7831 break;
7832 case TARGET_SIG_UNBLOCK:
7833 how = SIG_UNBLOCK;
7834 break;
7835 case TARGET_SIG_SETMASK:
7836 how = SIG_SETMASK;
7837 break;
7838 default:
7839 ret = -TARGET_EINVAL;
7840 goto fail;
7841 }
7842 mask = arg2;
7843 target_to_host_old_sigset(&set, &mask);
7844 sigprocmask(how, &set, &oldset);
7845 host_to_target_old_sigset(&mask, &oldset);
7846 ret = mask;
7847 }
7848 break;
7849 #endif
7850
7851 #ifdef TARGET_NR_getgid32
7852 case TARGET_NR_getgid32:
7853 ret = get_errno(getgid());
7854 break;
7855 #endif
7856 #ifdef TARGET_NR_geteuid32
7857 case TARGET_NR_geteuid32:
7858 ret = get_errno(geteuid());
7859 break;
7860 #endif
7861 #ifdef TARGET_NR_getegid32
7862 case TARGET_NR_getegid32:
7863 ret = get_errno(getegid());
7864 break;
7865 #endif
7866 #ifdef TARGET_NR_setreuid32
7867 case TARGET_NR_setreuid32:
7868 ret = get_errno(setreuid(arg1, arg2));
7869 break;
7870 #endif
7871 #ifdef TARGET_NR_setregid32
7872 case TARGET_NR_setregid32:
7873 ret = get_errno(setregid(arg1, arg2));
7874 break;
7875 #endif
7876 #ifdef TARGET_NR_getgroups32
7877 case TARGET_NR_getgroups32:
7878 {
7879 int gidsetsize = arg1;
7880 uint32_t *target_grouplist;
7881 gid_t *grouplist;
7882 int i;
7883
7884 grouplist = alloca(gidsetsize * sizeof(gid_t));
7885 ret = get_errno(getgroups(gidsetsize, grouplist));
7886 if (gidsetsize == 0)
7887 break;
7888 if (!is_error(ret)) {
7889 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7890 if (!target_grouplist) {
7891 ret = -TARGET_EFAULT;
7892 goto fail;
7893 }
7894 for(i = 0;i < ret; i++)
7895 target_grouplist[i] = tswap32(grouplist[i]);
7896 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7897 }
7898 }
7899 break;
7900 #endif
7901 #ifdef TARGET_NR_setgroups32
7902 case TARGET_NR_setgroups32:
7903 {
7904 int gidsetsize = arg1;
7905 uint32_t *target_grouplist;
7906 gid_t *grouplist;
7907 int i;
7908
7909 grouplist = alloca(gidsetsize * sizeof(gid_t));
7910 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7911 if (!target_grouplist) {
7912 ret = -TARGET_EFAULT;
7913 goto fail;
7914 }
7915 for(i = 0;i < gidsetsize; i++)
7916 grouplist[i] = tswap32(target_grouplist[i]);
7917 unlock_user(target_grouplist, arg2, 0);
7918 ret = get_errno(setgroups(gidsetsize, grouplist));
7919 }
7920 break;
7921 #endif
7922 #ifdef TARGET_NR_fchown32
7923 case TARGET_NR_fchown32:
7924 ret = get_errno(fchown(arg1, arg2, arg3));
7925 break;
7926 #endif
7927 #ifdef TARGET_NR_setresuid32
7928 case TARGET_NR_setresuid32:
7929 ret = get_errno(setresuid(arg1, arg2, arg3));
7930 break;
7931 #endif
7932 #ifdef TARGET_NR_getresuid32
7933 case TARGET_NR_getresuid32:
7934 {
7935 uid_t ruid, euid, suid;
7936 ret = get_errno(getresuid(&ruid, &euid, &suid));
7937 if (!is_error(ret)) {
7938 if (put_user_u32(ruid, arg1)
7939 || put_user_u32(euid, arg2)
7940 || put_user_u32(suid, arg3))
7941 goto efault;
7942 }
7943 }
7944 break;
7945 #endif
7946 #ifdef TARGET_NR_setresgid32
7947 case TARGET_NR_setresgid32:
7948 ret = get_errno(setresgid(arg1, arg2, arg3));
7949 break;
7950 #endif
7951 #ifdef TARGET_NR_getresgid32
7952 case TARGET_NR_getresgid32:
7953 {
7954 gid_t rgid, egid, sgid;
7955 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7956 if (!is_error(ret)) {
7957 if (put_user_u32(rgid, arg1)
7958 || put_user_u32(egid, arg2)
7959 || put_user_u32(sgid, arg3))
7960 goto efault;
7961 }
7962 }
7963 break;
7964 #endif
7965 #ifdef TARGET_NR_chown32
7966 case TARGET_NR_chown32:
7967 if (!(p = lock_user_string(arg1)))
7968 goto efault;
7969 ret = get_errno(chown(p, arg2, arg3));
7970 unlock_user(p, arg1, 0);
7971 break;
7972 #endif
7973 #ifdef TARGET_NR_setuid32
7974 case TARGET_NR_setuid32:
7975 ret = get_errno(setuid(arg1));
7976 break;
7977 #endif
7978 #ifdef TARGET_NR_setgid32
7979 case TARGET_NR_setgid32:
7980 ret = get_errno(setgid(arg1));
7981 break;
7982 #endif
7983 #ifdef TARGET_NR_setfsuid32
7984 case TARGET_NR_setfsuid32:
7985 ret = get_errno(setfsuid(arg1));
7986 break;
7987 #endif
7988 #ifdef TARGET_NR_setfsgid32
7989 case TARGET_NR_setfsgid32:
7990 ret = get_errno(setfsgid(arg1));
7991 break;
7992 #endif
7993
7994 case TARGET_NR_pivot_root:
7995 goto unimplemented;
7996 #ifdef TARGET_NR_mincore
7997 case TARGET_NR_mincore:
7998 {
7999 void *a;
8000 ret = -TARGET_EFAULT;
8001 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8002 goto efault;
8003 if (!(p = lock_user_string(arg3)))
8004 goto mincore_fail;
8005 ret = get_errno(mincore(a, arg2, p));
8006 unlock_user(p, arg3, ret);
8007 mincore_fail:
8008 unlock_user(a, arg1, 0);
8009 }
8010 break;
8011 #endif
8012 #ifdef TARGET_NR_arm_fadvise64_64
8013 case TARGET_NR_arm_fadvise64_64:
8014 {
8015 /*
8016 * arm_fadvise64_64 looks like fadvise64_64 but
8017 * with different argument order
8018 */
8019 abi_long temp;
8020 temp = arg3;
8021 arg3 = arg4;
8022 arg4 = temp;
8023 }
8024 #endif
8025 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8026 #ifdef TARGET_NR_fadvise64_64
8027 case TARGET_NR_fadvise64_64:
8028 #endif
8029 #ifdef TARGET_NR_fadvise64
8030 case TARGET_NR_fadvise64:
8031 #endif
8032 #ifdef TARGET_S390X
8033 switch (arg4) {
8034 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8035 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8036 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8037 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8038 default: break;
8039 }
8040 #endif
8041 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8042 break;
8043 #endif
8044 #ifdef TARGET_NR_madvise
8045 case TARGET_NR_madvise:
8046 /* A straight passthrough may not be safe because qemu sometimes
8047 turns private flie-backed mappings into anonymous mappings.
8048 This will break MADV_DONTNEED.
8049 This is a hint, so ignoring and returning success is ok. */
8050 ret = get_errno(0);
8051 break;
8052 #endif
8053 #if TARGET_ABI_BITS == 32
8054 case TARGET_NR_fcntl64:
8055 {
8056 int cmd;
8057 struct flock64 fl;
8058 struct target_flock64 *target_fl;
8059 #ifdef TARGET_ARM
8060 struct target_eabi_flock64 *target_efl;
8061 #endif
8062
8063 cmd = target_to_host_fcntl_cmd(arg2);
8064 if (cmd == -TARGET_EINVAL) {
8065 ret = cmd;
8066 break;
8067 }
8068
8069 switch(arg2) {
8070 case TARGET_F_GETLK64:
8071 #ifdef TARGET_ARM
8072 if (((CPUARMState *)cpu_env)->eabi) {
8073 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8074 goto efault;
8075 fl.l_type = tswap16(target_efl->l_type);
8076 fl.l_whence = tswap16(target_efl->l_whence);
8077 fl.l_start = tswap64(target_efl->l_start);
8078 fl.l_len = tswap64(target_efl->l_len);
8079 fl.l_pid = tswap32(target_efl->l_pid);
8080 unlock_user_struct(target_efl, arg3, 0);
8081 } else
8082 #endif
8083 {
8084 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8085 goto efault;
8086 fl.l_type = tswap16(target_fl->l_type);
8087 fl.l_whence = tswap16(target_fl->l_whence);
8088 fl.l_start = tswap64(target_fl->l_start);
8089 fl.l_len = tswap64(target_fl->l_len);
8090 fl.l_pid = tswap32(target_fl->l_pid);
8091 unlock_user_struct(target_fl, arg3, 0);
8092 }
8093 ret = get_errno(fcntl(arg1, cmd, &fl));
8094 if (ret == 0) {
8095 #ifdef TARGET_ARM
8096 if (((CPUARMState *)cpu_env)->eabi) {
8097 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8098 goto efault;
8099 target_efl->l_type = tswap16(fl.l_type);
8100 target_efl->l_whence = tswap16(fl.l_whence);
8101 target_efl->l_start = tswap64(fl.l_start);
8102 target_efl->l_len = tswap64(fl.l_len);
8103 target_efl->l_pid = tswap32(fl.l_pid);
8104 unlock_user_struct(target_efl, arg3, 1);
8105 } else
8106 #endif
8107 {
8108 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8109 goto efault;
8110 target_fl->l_type = tswap16(fl.l_type);
8111 target_fl->l_whence = tswap16(fl.l_whence);
8112 target_fl->l_start = tswap64(fl.l_start);
8113 target_fl->l_len = tswap64(fl.l_len);
8114 target_fl->l_pid = tswap32(fl.l_pid);
8115 unlock_user_struct(target_fl, arg3, 1);
8116 }
8117 }
8118 break;
8119
8120 case TARGET_F_SETLK64:
8121 case TARGET_F_SETLKW64:
8122 #ifdef TARGET_ARM
8123 if (((CPUARMState *)cpu_env)->eabi) {
8124 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8125 goto efault;
8126 fl.l_type = tswap16(target_efl->l_type);
8127 fl.l_whence = tswap16(target_efl->l_whence);
8128 fl.l_start = tswap64(target_efl->l_start);
8129 fl.l_len = tswap64(target_efl->l_len);
8130 fl.l_pid = tswap32(target_efl->l_pid);
8131 unlock_user_struct(target_efl, arg3, 0);
8132 } else
8133 #endif
8134 {
8135 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8136 goto efault;
8137 fl.l_type = tswap16(target_fl->l_type);
8138 fl.l_whence = tswap16(target_fl->l_whence);
8139 fl.l_start = tswap64(target_fl->l_start);
8140 fl.l_len = tswap64(target_fl->l_len);
8141 fl.l_pid = tswap32(target_fl->l_pid);
8142 unlock_user_struct(target_fl, arg3, 0);
8143 }
8144 ret = get_errno(fcntl(arg1, cmd, &fl));
8145 break;
8146 default:
8147 ret = do_fcntl(arg1, arg2, arg3);
8148 break;
8149 }
8150 break;
8151 }
8152 #endif
8153 #ifdef TARGET_NR_cacheflush
8154 case TARGET_NR_cacheflush:
8155 /* self-modifying code is handled automatically, so nothing needed */
8156 ret = 0;
8157 break;
8158 #endif
8159 #ifdef TARGET_NR_security
8160 case TARGET_NR_security:
8161 goto unimplemented;
8162 #endif
8163 #ifdef TARGET_NR_getpagesize
8164 case TARGET_NR_getpagesize:
8165 ret = TARGET_PAGE_SIZE;
8166 break;
8167 #endif
8168 case TARGET_NR_gettid:
8169 ret = get_errno(gettid());
8170 break;
8171 #ifdef TARGET_NR_readahead
8172 case TARGET_NR_readahead:
8173 #if TARGET_ABI_BITS == 32
8174 if (regpairs_aligned(cpu_env)) {
8175 arg2 = arg3;
8176 arg3 = arg4;
8177 arg4 = arg5;
8178 }
8179 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8180 #else
8181 ret = get_errno(readahead(arg1, arg2, arg3));
8182 #endif
8183 break;
8184 #endif
8185 #ifdef CONFIG_ATTR
8186 #ifdef TARGET_NR_setxattr
8187 case TARGET_NR_listxattr:
8188 case TARGET_NR_llistxattr:
8189 {
8190 void *p, *b = 0;
8191 if (arg2) {
8192 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8193 if (!b) {
8194 ret = -TARGET_EFAULT;
8195 break;
8196 }
8197 }
8198 p = lock_user_string(arg1);
8199 if (p) {
8200 if (num == TARGET_NR_listxattr) {
8201 ret = get_errno(listxattr(p, b, arg3));
8202 } else {
8203 ret = get_errno(llistxattr(p, b, arg3));
8204 }
8205 } else {
8206 ret = -TARGET_EFAULT;
8207 }
8208 unlock_user(p, arg1, 0);
8209 unlock_user(b, arg2, arg3);
8210 break;
8211 }
8212 case TARGET_NR_flistxattr:
8213 {
8214 void *b = 0;
8215 if (arg2) {
8216 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8217 if (!b) {
8218 ret = -TARGET_EFAULT;
8219 break;
8220 }
8221 }
8222 ret = get_errno(flistxattr(arg1, b, arg3));
8223 unlock_user(b, arg2, arg3);
8224 break;
8225 }
8226 case TARGET_NR_setxattr:
8227 case TARGET_NR_lsetxattr:
8228 {
8229 void *p, *n, *v = 0;
8230 if (arg3) {
8231 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8232 if (!v) {
8233 ret = -TARGET_EFAULT;
8234 break;
8235 }
8236 }
8237 p = lock_user_string(arg1);
8238 n = lock_user_string(arg2);
8239 if (p && n) {
8240 if (num == TARGET_NR_setxattr) {
8241 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8242 } else {
8243 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8244 }
8245 } else {
8246 ret = -TARGET_EFAULT;
8247 }
8248 unlock_user(p, arg1, 0);
8249 unlock_user(n, arg2, 0);
8250 unlock_user(v, arg3, 0);
8251 }
8252 break;
8253 case TARGET_NR_fsetxattr:
8254 {
8255 void *n, *v = 0;
8256 if (arg3) {
8257 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8258 if (!v) {
8259 ret = -TARGET_EFAULT;
8260 break;
8261 }
8262 }
8263 n = lock_user_string(arg2);
8264 if (n) {
8265 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8266 } else {
8267 ret = -TARGET_EFAULT;
8268 }
8269 unlock_user(n, arg2, 0);
8270 unlock_user(v, arg3, 0);
8271 }
8272 break;
8273 case TARGET_NR_getxattr:
8274 case TARGET_NR_lgetxattr:
8275 {
8276 void *p, *n, *v = 0;
8277 if (arg3) {
8278 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8279 if (!v) {
8280 ret = -TARGET_EFAULT;
8281 break;
8282 }
8283 }
8284 p = lock_user_string(arg1);
8285 n = lock_user_string(arg2);
8286 if (p && n) {
8287 if (num == TARGET_NR_getxattr) {
8288 ret = get_errno(getxattr(p, n, v, arg4));
8289 } else {
8290 ret = get_errno(lgetxattr(p, n, v, arg4));
8291 }
8292 } else {
8293 ret = -TARGET_EFAULT;
8294 }
8295 unlock_user(p, arg1, 0);
8296 unlock_user(n, arg2, 0);
8297 unlock_user(v, arg3, arg4);
8298 }
8299 break;
8300 case TARGET_NR_fgetxattr:
8301 {
8302 void *n, *v = 0;
8303 if (arg3) {
8304 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8305 if (!v) {
8306 ret = -TARGET_EFAULT;
8307 break;
8308 }
8309 }
8310 n = lock_user_string(arg2);
8311 if (n) {
8312 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8313 } else {
8314 ret = -TARGET_EFAULT;
8315 }
8316 unlock_user(n, arg2, 0);
8317 unlock_user(v, arg3, arg4);
8318 }
8319 break;
8320 case TARGET_NR_removexattr:
8321 case TARGET_NR_lremovexattr:
8322 {
8323 void *p, *n;
8324 p = lock_user_string(arg1);
8325 n = lock_user_string(arg2);
8326 if (p && n) {
8327 if (num == TARGET_NR_removexattr) {
8328 ret = get_errno(removexattr(p, n));
8329 } else {
8330 ret = get_errno(lremovexattr(p, n));
8331 }
8332 } else {
8333 ret = -TARGET_EFAULT;
8334 }
8335 unlock_user(p, arg1, 0);
8336 unlock_user(n, arg2, 0);
8337 }
8338 break;
8339 case TARGET_NR_fremovexattr:
8340 {
8341 void *n;
8342 n = lock_user_string(arg2);
8343 if (n) {
8344 ret = get_errno(fremovexattr(arg1, n));
8345 } else {
8346 ret = -TARGET_EFAULT;
8347 }
8348 unlock_user(n, arg2, 0);
8349 }
8350 break;
8351 #endif
8352 #endif /* CONFIG_ATTR */
8353 #ifdef TARGET_NR_set_thread_area
8354 case TARGET_NR_set_thread_area:
8355 #if defined(TARGET_MIPS)
8356 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8357 ret = 0;
8358 break;
8359 #elif defined(TARGET_CRIS)
8360 if (arg1 & 0xff)
8361 ret = -TARGET_EINVAL;
8362 else {
8363 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8364 ret = 0;
8365 }
8366 break;
8367 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8368 ret = do_set_thread_area(cpu_env, arg1);
8369 break;
8370 #else
8371 goto unimplemented_nowarn;
8372 #endif
8373 #endif
8374 #ifdef TARGET_NR_get_thread_area
8375 case TARGET_NR_get_thread_area:
8376 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8377 ret = do_get_thread_area(cpu_env, arg1);
8378 #else
8379 goto unimplemented_nowarn;
8380 #endif
8381 #endif
8382 #ifdef TARGET_NR_getdomainname
8383 case TARGET_NR_getdomainname:
8384 goto unimplemented_nowarn;
8385 #endif
8386
8387 #ifdef TARGET_NR_clock_gettime
8388 case TARGET_NR_clock_gettime:
8389 {
8390 struct timespec ts;
8391 ret = get_errno(clock_gettime(arg1, &ts));
8392 if (!is_error(ret)) {
8393 host_to_target_timespec(arg2, &ts);
8394 }
8395 break;
8396 }
8397 #endif
8398 #ifdef TARGET_NR_clock_getres
8399 case TARGET_NR_clock_getres:
8400 {
8401 struct timespec ts;
8402 ret = get_errno(clock_getres(arg1, &ts));
8403 if (!is_error(ret)) {
8404 host_to_target_timespec(arg2, &ts);
8405 }
8406 break;
8407 }
8408 #endif
8409 #ifdef TARGET_NR_clock_nanosleep
8410 case TARGET_NR_clock_nanosleep:
8411 {
8412 struct timespec ts;
8413 target_to_host_timespec(&ts, arg3);
8414 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8415 if (arg4)
8416 host_to_target_timespec(arg4, &ts);
8417 break;
8418 }
8419 #endif
8420
8421 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8422 case TARGET_NR_set_tid_address:
8423 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8424 break;
8425 #endif
8426
8427 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8428 case TARGET_NR_tkill:
8429 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8430 break;
8431 #endif
8432
8433 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8434 case TARGET_NR_tgkill:
8435 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8436 target_to_host_signal(arg3)));
8437 break;
8438 #endif
8439
8440 #ifdef TARGET_NR_set_robust_list
8441 case TARGET_NR_set_robust_list:
8442 goto unimplemented_nowarn;
8443 #endif
8444
8445 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8446 case TARGET_NR_utimensat:
8447 {
8448 struct timespec *tsp, ts[2];
8449 if (!arg3) {
8450 tsp = NULL;
8451 } else {
8452 target_to_host_timespec(ts, arg3);
8453 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8454 tsp = ts;
8455 }
8456 if (!arg2)
8457 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8458 else {
8459 if (!(p = lock_user_string(arg2))) {
8460 ret = -TARGET_EFAULT;
8461 goto fail;
8462 }
8463 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8464 unlock_user(p, arg2, 0);
8465 }
8466 }
8467 break;
8468 #endif
8469 #if defined(CONFIG_USE_NPTL)
8470 case TARGET_NR_futex:
8471 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8472 break;
8473 #endif
8474 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8475 case TARGET_NR_inotify_init:
8476 ret = get_errno(sys_inotify_init());
8477 break;
8478 #endif
8479 #ifdef CONFIG_INOTIFY1
8480 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8481 case TARGET_NR_inotify_init1:
8482 ret = get_errno(sys_inotify_init1(arg1));
8483 break;
8484 #endif
8485 #endif
8486 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8487 case TARGET_NR_inotify_add_watch:
8488 p = lock_user_string(arg2);
8489 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8490 unlock_user(p, arg2, 0);
8491 break;
8492 #endif
8493 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8494 case TARGET_NR_inotify_rm_watch:
8495 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8496 break;
8497 #endif
8498
8499 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8500 case TARGET_NR_mq_open:
8501 {
8502 struct mq_attr posix_mq_attr;
8503
8504 p = lock_user_string(arg1 - 1);
8505 if (arg4 != 0)
8506 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8507 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8508 unlock_user (p, arg1, 0);
8509 }
8510 break;
8511
8512 case TARGET_NR_mq_unlink:
8513 p = lock_user_string(arg1 - 1);
8514 ret = get_errno(mq_unlink(p));
8515 unlock_user (p, arg1, 0);
8516 break;
8517
8518 case TARGET_NR_mq_timedsend:
8519 {
8520 struct timespec ts;
8521
8522 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8523 if (arg5 != 0) {
8524 target_to_host_timespec(&ts, arg5);
8525 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8526 host_to_target_timespec(arg5, &ts);
8527 }
8528 else
8529 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8530 unlock_user (p, arg2, arg3);
8531 }
8532 break;
8533
8534 case TARGET_NR_mq_timedreceive:
8535 {
8536 struct timespec ts;
8537 unsigned int prio;
8538
8539 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8540 if (arg5 != 0) {
8541 target_to_host_timespec(&ts, arg5);
8542 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8543 host_to_target_timespec(arg5, &ts);
8544 }
8545 else
8546 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8547 unlock_user (p, arg2, arg3);
8548 if (arg4 != 0)
8549 put_user_u32(prio, arg4);
8550 }
8551 break;
8552
8553 /* Not implemented for now... */
8554 /* case TARGET_NR_mq_notify: */
8555 /* break; */
8556
8557 case TARGET_NR_mq_getsetattr:
8558 {
8559 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8560 ret = 0;
8561 if (arg3 != 0) {
8562 ret = mq_getattr(arg1, &posix_mq_attr_out);
8563 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8564 }
8565 if (arg2 != 0) {
8566 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8567 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8568 }
8569
8570 }
8571 break;
8572 #endif
8573
8574 #ifdef CONFIG_SPLICE
8575 #ifdef TARGET_NR_tee
8576 case TARGET_NR_tee:
8577 {
8578 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8579 }
8580 break;
8581 #endif
8582 #ifdef TARGET_NR_splice
8583 case TARGET_NR_splice:
8584 {
8585 loff_t loff_in, loff_out;
8586 loff_t *ploff_in = NULL, *ploff_out = NULL;
8587 if(arg2) {
8588 get_user_u64(loff_in, arg2);
8589 ploff_in = &loff_in;
8590 }
8591 if(arg4) {
8592 get_user_u64(loff_out, arg2);
8593 ploff_out = &loff_out;
8594 }
8595 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8596 }
8597 break;
8598 #endif
8599 #ifdef TARGET_NR_vmsplice
8600 case TARGET_NR_vmsplice:
8601 {
8602 int count = arg3;
8603 struct iovec *vec;
8604
8605 vec = alloca(count * sizeof(struct iovec));
8606 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
8607 goto efault;
8608 ret = get_errno(vmsplice(arg1, vec, count, arg4));
8609 unlock_iovec(vec, arg2, count, 0);
8610 }
8611 break;
8612 #endif
8613 #endif /* CONFIG_SPLICE */
8614 #ifdef CONFIG_EVENTFD
8615 #if defined(TARGET_NR_eventfd)
8616 case TARGET_NR_eventfd:
8617 ret = get_errno(eventfd(arg1, 0));
8618 break;
8619 #endif
8620 #if defined(TARGET_NR_eventfd2)
8621 case TARGET_NR_eventfd2:
8622 ret = get_errno(eventfd(arg1, arg2));
8623 break;
8624 #endif
8625 #endif /* CONFIG_EVENTFD */
8626 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8627 case TARGET_NR_fallocate:
8628 #if TARGET_ABI_BITS == 32
8629 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8630 target_offset64(arg5, arg6)));
8631 #else
8632 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8633 #endif
8634 break;
8635 #endif
8636 #if defined(CONFIG_SYNC_FILE_RANGE)
8637 #if defined(TARGET_NR_sync_file_range)
8638 case TARGET_NR_sync_file_range:
8639 #if TARGET_ABI_BITS == 32
8640 #if defined(TARGET_MIPS)
8641 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8642 target_offset64(arg5, arg6), arg7));
8643 #else
8644 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8645 target_offset64(arg4, arg5), arg6));
8646 #endif /* !TARGET_MIPS */
8647 #else
8648 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8649 #endif
8650 break;
8651 #endif
8652 #if defined(TARGET_NR_sync_file_range2)
8653 case TARGET_NR_sync_file_range2:
8654 /* This is like sync_file_range but the arguments are reordered */
8655 #if TARGET_ABI_BITS == 32
8656 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8657 target_offset64(arg5, arg6), arg2));
8658 #else
8659 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8660 #endif
8661 break;
8662 #endif
8663 #endif
8664 #if defined(CONFIG_EPOLL)
8665 #if defined(TARGET_NR_epoll_create)
8666 case TARGET_NR_epoll_create:
8667 ret = get_errno(epoll_create(arg1));
8668 break;
8669 #endif
8670 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8671 case TARGET_NR_epoll_create1:
8672 ret = get_errno(epoll_create1(arg1));
8673 break;
8674 #endif
8675 #if defined(TARGET_NR_epoll_ctl)
8676 case TARGET_NR_epoll_ctl:
8677 {
8678 struct epoll_event ep;
8679 struct epoll_event *epp = 0;
8680 if (arg4) {
8681 struct target_epoll_event *target_ep;
8682 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8683 goto efault;
8684 }
8685 ep.events = tswap32(target_ep->events);
8686 /* The epoll_data_t union is just opaque data to the kernel,
8687 * so we transfer all 64 bits across and need not worry what
8688 * actual data type it is.
8689 */
8690 ep.data.u64 = tswap64(target_ep->data.u64);
8691 unlock_user_struct(target_ep, arg4, 0);
8692 epp = &ep;
8693 }
8694 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8695 break;
8696 }
8697 #endif
8698
8699 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8700 #define IMPLEMENT_EPOLL_PWAIT
8701 #endif
8702 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8703 #if defined(TARGET_NR_epoll_wait)
8704 case TARGET_NR_epoll_wait:
8705 #endif
8706 #if defined(IMPLEMENT_EPOLL_PWAIT)
8707 case TARGET_NR_epoll_pwait:
8708 #endif
8709 {
8710 struct target_epoll_event *target_ep;
8711 struct epoll_event *ep;
8712 int epfd = arg1;
8713 int maxevents = arg3;
8714 int timeout = arg4;
8715
8716 target_ep = lock_user(VERIFY_WRITE, arg2,
8717 maxevents * sizeof(struct target_epoll_event), 1);
8718 if (!target_ep) {
8719 goto efault;
8720 }
8721
8722 ep = alloca(maxevents * sizeof(struct epoll_event));
8723
8724 switch (num) {
8725 #if defined(IMPLEMENT_EPOLL_PWAIT)
8726 case TARGET_NR_epoll_pwait:
8727 {
8728 target_sigset_t *target_set;
8729 sigset_t _set, *set = &_set;
8730
8731 if (arg5) {
8732 target_set = lock_user(VERIFY_READ, arg5,
8733 sizeof(target_sigset_t), 1);
8734 if (!target_set) {
8735 unlock_user(target_ep, arg2, 0);
8736 goto efault;
8737 }
8738 target_to_host_sigset(set, target_set);
8739 unlock_user(target_set, arg5, 0);
8740 } else {
8741 set = NULL;
8742 }
8743
8744 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8745 break;
8746 }
8747 #endif
8748 #if defined(TARGET_NR_epoll_wait)
8749 case TARGET_NR_epoll_wait:
8750 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8751 break;
8752 #endif
8753 default:
8754 ret = -TARGET_ENOSYS;
8755 }
8756 if (!is_error(ret)) {
8757 int i;
8758 for (i = 0; i < ret; i++) {
8759 target_ep[i].events = tswap32(ep[i].events);
8760 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8761 }
8762 }
8763 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8764 break;
8765 }
8766 #endif
8767 #endif
8768 #ifdef TARGET_NR_prlimit64
8769 case TARGET_NR_prlimit64:
8770 {
8771 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8772 struct target_rlimit64 *target_rnew, *target_rold;
8773 struct host_rlimit64 rnew, rold, *rnewp = 0;
8774 if (arg3) {
8775 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8776 goto efault;
8777 }
8778 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8779 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8780 unlock_user_struct(target_rnew, arg3, 0);
8781 rnewp = &rnew;
8782 }
8783
8784 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8785 if (!is_error(ret) && arg4) {
8786 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8787 goto efault;
8788 }
8789 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8790 target_rold->rlim_max = tswap64(rold.rlim_max);
8791 unlock_user_struct(target_rold, arg4, 1);
8792 }
8793 break;
8794 }
8795 #endif
8796 default:
8797 unimplemented:
8798 gemu_log("qemu: Unsupported syscall: %d\n", num);
8799 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8800 unimplemented_nowarn:
8801 #endif
8802 ret = -TARGET_ENOSYS;
8803 break;
8804 }
8805 fail:
8806 #ifdef DEBUG
8807 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8808 #endif
8809 if(do_strace)
8810 print_syscall_ret(num, ret);
8811 return ret;
8812 efault:
8813 ret = -TARGET_EFAULT;
8814 goto fail;
8815 }