]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
linux-user: Move target_to_host_errno_table[] setup out of ioctl loop
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73 #ifdef CONFIG_ATTR
74 #include "qemu-xattr.h"
75 #endif
76
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include "linux_loop.h"
100 #include "cpu-uname.h"
101
102 #include "qemu.h"
103
104 #if defined(CONFIG_USE_NPTL)
105 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
106 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
107 #else
108 /* XXX: Hardcode the above values. */
109 #define CLONE_NPTL_FLAGS2 0
110 #endif
111
112 //#define DEBUG
113
114 //#include <linux/msdos_fs.h>
115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
117
118
119 #undef _syscall0
120 #undef _syscall1
121 #undef _syscall2
122 #undef _syscall3
123 #undef _syscall4
124 #undef _syscall5
125 #undef _syscall6
126
127 #define _syscall0(type,name) \
128 static type name (void) \
129 { \
130 return syscall(__NR_##name); \
131 }
132
133 #define _syscall1(type,name,type1,arg1) \
134 static type name (type1 arg1) \
135 { \
136 return syscall(__NR_##name, arg1); \
137 }
138
139 #define _syscall2(type,name,type1,arg1,type2,arg2) \
140 static type name (type1 arg1,type2 arg2) \
141 { \
142 return syscall(__NR_##name, arg1, arg2); \
143 }
144
145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
146 static type name (type1 arg1,type2 arg2,type3 arg3) \
147 { \
148 return syscall(__NR_##name, arg1, arg2, arg3); \
149 }
150
151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
153 { \
154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
155 }
156
157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
158 type5,arg5) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
160 { \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 }
163
164
165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 type5,arg5,type6,arg6) \
167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
168 type6 arg6) \
169 { \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 }
172
173
174 #define __NR_sys_uname __NR_uname
175 #define __NR_sys_faccessat __NR_faccessat
176 #define __NR_sys_fchmodat __NR_fchmodat
177 #define __NR_sys_fchownat __NR_fchownat
178 #define __NR_sys_fstatat64 __NR_fstatat64
179 #define __NR_sys_futimesat __NR_futimesat
180 #define __NR_sys_getcwd1 __NR_getcwd
181 #define __NR_sys_getdents __NR_getdents
182 #define __NR_sys_getdents64 __NR_getdents64
183 #define __NR_sys_getpriority __NR_getpriority
184 #define __NR_sys_linkat __NR_linkat
185 #define __NR_sys_mkdirat __NR_mkdirat
186 #define __NR_sys_mknodat __NR_mknodat
187 #define __NR_sys_newfstatat __NR_newfstatat
188 #define __NR_sys_openat __NR_openat
189 #define __NR_sys_readlinkat __NR_readlinkat
190 #define __NR_sys_renameat __NR_renameat
191 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
192 #define __NR_sys_symlinkat __NR_symlinkat
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_tgkill __NR_tgkill
195 #define __NR_sys_tkill __NR_tkill
196 #define __NR_sys_unlinkat __NR_unlinkat
197 #define __NR_sys_utimensat __NR_utimensat
198 #define __NR_sys_futex __NR_futex
199 #define __NR_sys_inotify_init __NR_inotify_init
200 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
201 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
202
203 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
204 defined(__s390x__)
205 #define __NR__llseek __NR_lseek
206 #endif
207
208 #ifdef __NR_gettid
209 _syscall0(int, gettid)
210 #else
211 /* This is a replacement for the host gettid() and must return a host
212 errno. */
213 static int gettid(void) {
214 return -ENOSYS;
215 }
216 #endif
217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
218 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
220 #endif
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
223 loff_t *, res, uint, wh);
224 #endif
225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
227 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
228 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
229 #endif
230 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
231 _syscall2(int,sys_tkill,int,tid,int,sig)
232 #endif
233 #ifdef __NR_exit_group
234 _syscall1(int,exit_group,int,error_code)
235 #endif
236 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
237 _syscall1(int,set_tid_address,int *,tidptr)
238 #endif
239 #if defined(CONFIG_USE_NPTL)
240 #if defined(TARGET_NR_futex) && defined(__NR_futex)
241 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
242 const struct timespec *,timeout,int *,uaddr2,int,val3)
243 #endif
244 #endif
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
250 unsigned long *, user_mask_ptr);
251 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
252 void *, arg);
253
254 static bitmask_transtbl fcntl_flags_tbl[] = {
255 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
256 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
257 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
258 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
259 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
260 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
261 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
262 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
263 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
264 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
265 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
266 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
267 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
268 #if defined(O_DIRECT)
269 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
270 #endif
271 #if defined(O_NOATIME)
272 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
273 #endif
274 #if defined(O_CLOEXEC)
275 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
276 #endif
277 #if defined(O_PATH)
278 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
279 #endif
280 /* Don't terminate the list prematurely on 64-bit host+guest. */
281 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
282 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
283 #endif
284 { 0, 0, 0, 0 }
285 };
286
287 #define COPY_UTSNAME_FIELD(dest, src) \
288 do { \
289 /* __NEW_UTS_LEN doesn't include terminating null */ \
290 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
291 (dest)[__NEW_UTS_LEN] = '\0'; \
292 } while (0)
293
294 static int sys_uname(struct new_utsname *buf)
295 {
296 struct utsname uts_buf;
297
298 if (uname(&uts_buf) < 0)
299 return (-1);
300
301 /*
302 * Just in case these have some differences, we
303 * translate utsname to new_utsname (which is the
304 * struct linux kernel uses).
305 */
306
307 memset(buf, 0, sizeof(*buf));
308 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
309 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
310 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
311 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
312 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
313 #ifdef _GNU_SOURCE
314 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
315 #endif
316 return (0);
317
318 #undef COPY_UTSNAME_FIELD
319 }
320
321 static int sys_getcwd1(char *buf, size_t size)
322 {
323 if (getcwd(buf, size) == NULL) {
324 /* getcwd() sets errno */
325 return (-1);
326 }
327 return strlen(buf)+1;
328 }
329
330 #ifdef CONFIG_ATFILE
331 /*
332 * Host system seems to have atfile syscall stubs available. We
333 * now enable them one by one as specified by target syscall_nr.h.
334 */
335
336 #ifdef TARGET_NR_faccessat
337 static int sys_faccessat(int dirfd, const char *pathname, int mode)
338 {
339 return (faccessat(dirfd, pathname, mode, 0));
340 }
341 #endif
342 #ifdef TARGET_NR_fchmodat
343 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
344 {
345 return (fchmodat(dirfd, pathname, mode, 0));
346 }
347 #endif
348 #if defined(TARGET_NR_fchownat)
349 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
350 gid_t group, int flags)
351 {
352 return (fchownat(dirfd, pathname, owner, group, flags));
353 }
354 #endif
355 #ifdef __NR_fstatat64
356 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
357 int flags)
358 {
359 return (fstatat(dirfd, pathname, buf, flags));
360 }
361 #endif
362 #ifdef __NR_newfstatat
363 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
364 int flags)
365 {
366 return (fstatat(dirfd, pathname, buf, flags));
367 }
368 #endif
369 #ifdef TARGET_NR_futimesat
370 static int sys_futimesat(int dirfd, const char *pathname,
371 const struct timeval times[2])
372 {
373 return (futimesat(dirfd, pathname, times));
374 }
375 #endif
376 #ifdef TARGET_NR_linkat
377 static int sys_linkat(int olddirfd, const char *oldpath,
378 int newdirfd, const char *newpath, int flags)
379 {
380 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
381 }
382 #endif
383 #ifdef TARGET_NR_mkdirat
384 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
385 {
386 return (mkdirat(dirfd, pathname, mode));
387 }
388 #endif
389 #ifdef TARGET_NR_mknodat
390 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
391 dev_t dev)
392 {
393 return (mknodat(dirfd, pathname, mode, dev));
394 }
395 #endif
396 #ifdef TARGET_NR_openat
397 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
398 {
399 /*
400 * open(2) has extra parameter 'mode' when called with
401 * flag O_CREAT.
402 */
403 if ((flags & O_CREAT) != 0) {
404 return (openat(dirfd, pathname, flags, mode));
405 }
406 return (openat(dirfd, pathname, flags));
407 }
408 #endif
409 #ifdef TARGET_NR_readlinkat
410 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
411 {
412 return (readlinkat(dirfd, pathname, buf, bufsiz));
413 }
414 #endif
415 #ifdef TARGET_NR_renameat
416 static int sys_renameat(int olddirfd, const char *oldpath,
417 int newdirfd, const char *newpath)
418 {
419 return (renameat(olddirfd, oldpath, newdirfd, newpath));
420 }
421 #endif
422 #ifdef TARGET_NR_symlinkat
423 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
424 {
425 return (symlinkat(oldpath, newdirfd, newpath));
426 }
427 #endif
428 #ifdef TARGET_NR_unlinkat
429 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
430 {
431 return (unlinkat(dirfd, pathname, flags));
432 }
433 #endif
434 #else /* !CONFIG_ATFILE */
435
436 /*
437 * Try direct syscalls instead
438 */
439 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
440 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
441 #endif
442 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
443 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
444 #endif
445 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
446 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
447 uid_t,owner,gid_t,group,int,flags)
448 #endif
449 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
450 defined(__NR_fstatat64)
451 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
452 struct stat *,buf,int,flags)
453 #endif
454 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
455 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
456 const struct timeval *,times)
457 #endif
458 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
459 defined(__NR_newfstatat)
460 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
461 struct stat *,buf,int,flags)
462 #endif
463 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
464 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
465 int,newdirfd,const char *,newpath,int,flags)
466 #endif
467 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
468 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
469 #endif
470 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
471 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
472 mode_t,mode,dev_t,dev)
473 #endif
474 #if defined(TARGET_NR_openat) && defined(__NR_openat)
475 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
476 #endif
477 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
478 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
479 char *,buf,size_t,bufsize)
480 #endif
481 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
482 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
483 int,newdirfd,const char *,newpath)
484 #endif
485 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
486 _syscall3(int,sys_symlinkat,const char *,oldpath,
487 int,newdirfd,const char *,newpath)
488 #endif
489 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
490 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
491 #endif
492
493 #endif /* CONFIG_ATFILE */
494
495 #ifdef CONFIG_UTIMENSAT
496 static int sys_utimensat(int dirfd, const char *pathname,
497 const struct timespec times[2], int flags)
498 {
499 if (pathname == NULL)
500 return futimens(dirfd, times);
501 else
502 return utimensat(dirfd, pathname, times, flags);
503 }
504 #else
505 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
506 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
507 const struct timespec *,tsp,int,flags)
508 #endif
509 #endif /* CONFIG_UTIMENSAT */
510
511 #ifdef CONFIG_INOTIFY
512 #include <sys/inotify.h>
513
514 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
515 static int sys_inotify_init(void)
516 {
517 return (inotify_init());
518 }
519 #endif
520 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
521 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
522 {
523 return (inotify_add_watch(fd, pathname, mask));
524 }
525 #endif
526 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
527 static int sys_inotify_rm_watch(int fd, int32_t wd)
528 {
529 return (inotify_rm_watch(fd, wd));
530 }
531 #endif
532 #ifdef CONFIG_INOTIFY1
533 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
534 static int sys_inotify_init1(int flags)
535 {
536 return (inotify_init1(flags));
537 }
538 #endif
539 #endif
540 #else
541 /* Userspace can usually survive runtime without inotify */
542 #undef TARGET_NR_inotify_init
543 #undef TARGET_NR_inotify_init1
544 #undef TARGET_NR_inotify_add_watch
545 #undef TARGET_NR_inotify_rm_watch
546 #endif /* CONFIG_INOTIFY */
547
548 #if defined(TARGET_NR_ppoll)
549 #ifndef __NR_ppoll
550 # define __NR_ppoll -1
551 #endif
552 #define __NR_sys_ppoll __NR_ppoll
553 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
554 struct timespec *, timeout, const __sigset_t *, sigmask,
555 size_t, sigsetsize)
556 #endif
557
558 #if defined(TARGET_NR_pselect6)
559 #ifndef __NR_pselect6
560 # define __NR_pselect6 -1
561 #endif
562 #define __NR_sys_pselect6 __NR_pselect6
563 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
564 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
565 #endif
566
567 #if defined(TARGET_NR_prlimit64)
568 #ifndef __NR_prlimit64
569 # define __NR_prlimit64 -1
570 #endif
571 #define __NR_sys_prlimit64 __NR_prlimit64
572 /* The glibc rlimit structure may not be that used by the underlying syscall */
573 struct host_rlimit64 {
574 uint64_t rlim_cur;
575 uint64_t rlim_max;
576 };
577 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
578 const struct host_rlimit64 *, new_limit,
579 struct host_rlimit64 *, old_limit)
580 #endif
581
582 extern int personality(int);
583 extern int flock(int, int);
584 extern int setfsuid(int);
585 extern int setfsgid(int);
586 extern int setgroups(int, gid_t *);
587
588 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
589 #ifdef TARGET_ARM
590 static inline int regpairs_aligned(void *cpu_env) {
591 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
592 }
593 #elif defined(TARGET_MIPS)
594 static inline int regpairs_aligned(void *cpu_env) { return 1; }
595 #else
596 static inline int regpairs_aligned(void *cpu_env) { return 0; }
597 #endif
598
599 #define ERRNO_TABLE_SIZE 1200
600
601 /* target_to_host_errno_table[] is initialized from
602 * host_to_target_errno_table[] in syscall_init(). */
603 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
604 };
605
606 /*
607 * This list is the union of errno values overridden in asm-<arch>/errno.h
608 * minus the errnos that are not actually generic to all archs.
609 */
610 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
611 [EIDRM] = TARGET_EIDRM,
612 [ECHRNG] = TARGET_ECHRNG,
613 [EL2NSYNC] = TARGET_EL2NSYNC,
614 [EL3HLT] = TARGET_EL3HLT,
615 [EL3RST] = TARGET_EL3RST,
616 [ELNRNG] = TARGET_ELNRNG,
617 [EUNATCH] = TARGET_EUNATCH,
618 [ENOCSI] = TARGET_ENOCSI,
619 [EL2HLT] = TARGET_EL2HLT,
620 [EDEADLK] = TARGET_EDEADLK,
621 [ENOLCK] = TARGET_ENOLCK,
622 [EBADE] = TARGET_EBADE,
623 [EBADR] = TARGET_EBADR,
624 [EXFULL] = TARGET_EXFULL,
625 [ENOANO] = TARGET_ENOANO,
626 [EBADRQC] = TARGET_EBADRQC,
627 [EBADSLT] = TARGET_EBADSLT,
628 [EBFONT] = TARGET_EBFONT,
629 [ENOSTR] = TARGET_ENOSTR,
630 [ENODATA] = TARGET_ENODATA,
631 [ETIME] = TARGET_ETIME,
632 [ENOSR] = TARGET_ENOSR,
633 [ENONET] = TARGET_ENONET,
634 [ENOPKG] = TARGET_ENOPKG,
635 [EREMOTE] = TARGET_EREMOTE,
636 [ENOLINK] = TARGET_ENOLINK,
637 [EADV] = TARGET_EADV,
638 [ESRMNT] = TARGET_ESRMNT,
639 [ECOMM] = TARGET_ECOMM,
640 [EPROTO] = TARGET_EPROTO,
641 [EDOTDOT] = TARGET_EDOTDOT,
642 [EMULTIHOP] = TARGET_EMULTIHOP,
643 [EBADMSG] = TARGET_EBADMSG,
644 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
645 [EOVERFLOW] = TARGET_EOVERFLOW,
646 [ENOTUNIQ] = TARGET_ENOTUNIQ,
647 [EBADFD] = TARGET_EBADFD,
648 [EREMCHG] = TARGET_EREMCHG,
649 [ELIBACC] = TARGET_ELIBACC,
650 [ELIBBAD] = TARGET_ELIBBAD,
651 [ELIBSCN] = TARGET_ELIBSCN,
652 [ELIBMAX] = TARGET_ELIBMAX,
653 [ELIBEXEC] = TARGET_ELIBEXEC,
654 [EILSEQ] = TARGET_EILSEQ,
655 [ENOSYS] = TARGET_ENOSYS,
656 [ELOOP] = TARGET_ELOOP,
657 [ERESTART] = TARGET_ERESTART,
658 [ESTRPIPE] = TARGET_ESTRPIPE,
659 [ENOTEMPTY] = TARGET_ENOTEMPTY,
660 [EUSERS] = TARGET_EUSERS,
661 [ENOTSOCK] = TARGET_ENOTSOCK,
662 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
663 [EMSGSIZE] = TARGET_EMSGSIZE,
664 [EPROTOTYPE] = TARGET_EPROTOTYPE,
665 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
666 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
667 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
668 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
669 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
670 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
671 [EADDRINUSE] = TARGET_EADDRINUSE,
672 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
673 [ENETDOWN] = TARGET_ENETDOWN,
674 [ENETUNREACH] = TARGET_ENETUNREACH,
675 [ENETRESET] = TARGET_ENETRESET,
676 [ECONNABORTED] = TARGET_ECONNABORTED,
677 [ECONNRESET] = TARGET_ECONNRESET,
678 [ENOBUFS] = TARGET_ENOBUFS,
679 [EISCONN] = TARGET_EISCONN,
680 [ENOTCONN] = TARGET_ENOTCONN,
681 [EUCLEAN] = TARGET_EUCLEAN,
682 [ENOTNAM] = TARGET_ENOTNAM,
683 [ENAVAIL] = TARGET_ENAVAIL,
684 [EISNAM] = TARGET_EISNAM,
685 [EREMOTEIO] = TARGET_EREMOTEIO,
686 [ESHUTDOWN] = TARGET_ESHUTDOWN,
687 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
688 [ETIMEDOUT] = TARGET_ETIMEDOUT,
689 [ECONNREFUSED] = TARGET_ECONNREFUSED,
690 [EHOSTDOWN] = TARGET_EHOSTDOWN,
691 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
692 [EALREADY] = TARGET_EALREADY,
693 [EINPROGRESS] = TARGET_EINPROGRESS,
694 [ESTALE] = TARGET_ESTALE,
695 [ECANCELED] = TARGET_ECANCELED,
696 [ENOMEDIUM] = TARGET_ENOMEDIUM,
697 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
698 #ifdef ENOKEY
699 [ENOKEY] = TARGET_ENOKEY,
700 #endif
701 #ifdef EKEYEXPIRED
702 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
703 #endif
704 #ifdef EKEYREVOKED
705 [EKEYREVOKED] = TARGET_EKEYREVOKED,
706 #endif
707 #ifdef EKEYREJECTED
708 [EKEYREJECTED] = TARGET_EKEYREJECTED,
709 #endif
710 #ifdef EOWNERDEAD
711 [EOWNERDEAD] = TARGET_EOWNERDEAD,
712 #endif
713 #ifdef ENOTRECOVERABLE
714 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
715 #endif
716 };
717
718 static inline int host_to_target_errno(int err)
719 {
720 if(host_to_target_errno_table[err])
721 return host_to_target_errno_table[err];
722 return err;
723 }
724
725 static inline int target_to_host_errno(int err)
726 {
727 if (target_to_host_errno_table[err])
728 return target_to_host_errno_table[err];
729 return err;
730 }
731
732 static inline abi_long get_errno(abi_long ret)
733 {
734 if (ret == -1)
735 return -host_to_target_errno(errno);
736 else
737 return ret;
738 }
739
740 static inline int is_error(abi_long ret)
741 {
742 return (abi_ulong)ret >= (abi_ulong)(-4096);
743 }
744
745 char *target_strerror(int err)
746 {
747 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
748 return NULL;
749 }
750 return strerror(target_to_host_errno(err));
751 }
752
753 static abi_ulong target_brk;
754 static abi_ulong target_original_brk;
755 static abi_ulong brk_page;
756
757 void target_set_brk(abi_ulong new_brk)
758 {
759 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
760 brk_page = HOST_PAGE_ALIGN(target_brk);
761 }
762
763 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
764 #define DEBUGF_BRK(message, args...)
765
766 /* do_brk() must return target values and target errnos. */
767 abi_long do_brk(abi_ulong new_brk)
768 {
769 abi_long mapped_addr;
770 int new_alloc_size;
771
772 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
773
774 if (!new_brk) {
775 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
776 return target_brk;
777 }
778 if (new_brk < target_original_brk) {
779 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
780 target_brk);
781 return target_brk;
782 }
783
784 /* If the new brk is less than the highest page reserved to the
785 * target heap allocation, set it and we're almost done... */
786 if (new_brk <= brk_page) {
787 /* Heap contents are initialized to zero, as for anonymous
788 * mapped pages. */
789 if (new_brk > target_brk) {
790 memset(g2h(target_brk), 0, new_brk - target_brk);
791 }
792 target_brk = new_brk;
793 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
794 return target_brk;
795 }
796
797 /* We need to allocate more memory after the brk... Note that
798 * we don't use MAP_FIXED because that will map over the top of
799 * any existing mapping (like the one with the host libc or qemu
800 * itself); instead we treat "mapped but at wrong address" as
801 * a failure and unmap again.
802 */
803 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
804 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
805 PROT_READ|PROT_WRITE,
806 MAP_ANON|MAP_PRIVATE, 0, 0));
807
808 if (mapped_addr == brk_page) {
809 /* Heap contents are initialized to zero, as for anonymous
810 * mapped pages. Technically the new pages are already
811 * initialized to zero since they *are* anonymous mapped
812 * pages, however we have to take care with the contents that
813 * come from the remaining part of the previous page: it may
814 * contains garbage data due to a previous heap usage (grown
815 * then shrunken). */
816 memset(g2h(target_brk), 0, brk_page - target_brk);
817
818 target_brk = new_brk;
819 brk_page = HOST_PAGE_ALIGN(target_brk);
820 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
821 target_brk);
822 return target_brk;
823 } else if (mapped_addr != -1) {
824 /* Mapped but at wrong address, meaning there wasn't actually
825 * enough space for this brk.
826 */
827 target_munmap(mapped_addr, new_alloc_size);
828 mapped_addr = -1;
829 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
830 }
831 else {
832 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
833 }
834
835 #if defined(TARGET_ALPHA)
836 /* We (partially) emulate OSF/1 on Alpha, which requires we
837 return a proper errno, not an unchanged brk value. */
838 return -TARGET_ENOMEM;
839 #endif
840 /* For everything else, return the previous break. */
841 return target_brk;
842 }
843
844 static inline abi_long copy_from_user_fdset(fd_set *fds,
845 abi_ulong target_fds_addr,
846 int n)
847 {
848 int i, nw, j, k;
849 abi_ulong b, *target_fds;
850
851 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
852 if (!(target_fds = lock_user(VERIFY_READ,
853 target_fds_addr,
854 sizeof(abi_ulong) * nw,
855 1)))
856 return -TARGET_EFAULT;
857
858 FD_ZERO(fds);
859 k = 0;
860 for (i = 0; i < nw; i++) {
861 /* grab the abi_ulong */
862 __get_user(b, &target_fds[i]);
863 for (j = 0; j < TARGET_ABI_BITS; j++) {
864 /* check the bit inside the abi_ulong */
865 if ((b >> j) & 1)
866 FD_SET(k, fds);
867 k++;
868 }
869 }
870
871 unlock_user(target_fds, target_fds_addr, 0);
872
873 return 0;
874 }
875
876 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
877 abi_ulong target_fds_addr,
878 int n)
879 {
880 if (target_fds_addr) {
881 if (copy_from_user_fdset(fds, target_fds_addr, n))
882 return -TARGET_EFAULT;
883 *fds_ptr = fds;
884 } else {
885 *fds_ptr = NULL;
886 }
887 return 0;
888 }
889
890 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
891 const fd_set *fds,
892 int n)
893 {
894 int i, nw, j, k;
895 abi_long v;
896 abi_ulong *target_fds;
897
898 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
899 if (!(target_fds = lock_user(VERIFY_WRITE,
900 target_fds_addr,
901 sizeof(abi_ulong) * nw,
902 0)))
903 return -TARGET_EFAULT;
904
905 k = 0;
906 for (i = 0; i < nw; i++) {
907 v = 0;
908 for (j = 0; j < TARGET_ABI_BITS; j++) {
909 v |= ((FD_ISSET(k, fds) != 0) << j);
910 k++;
911 }
912 __put_user(v, &target_fds[i]);
913 }
914
915 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
916
917 return 0;
918 }
919
920 #if defined(__alpha__)
921 #define HOST_HZ 1024
922 #else
923 #define HOST_HZ 100
924 #endif
925
926 static inline abi_long host_to_target_clock_t(long ticks)
927 {
928 #if HOST_HZ == TARGET_HZ
929 return ticks;
930 #else
931 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
932 #endif
933 }
934
935 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
936 const struct rusage *rusage)
937 {
938 struct target_rusage *target_rusage;
939
940 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
941 return -TARGET_EFAULT;
942 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
943 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
944 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
945 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
946 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
947 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
948 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
949 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
950 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
951 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
952 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
953 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
954 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
955 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
956 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
957 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
958 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
959 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
960 unlock_user_struct(target_rusage, target_addr, 1);
961
962 return 0;
963 }
964
965 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
966 {
967 abi_ulong target_rlim_swap;
968 rlim_t result;
969
970 target_rlim_swap = tswapal(target_rlim);
971 if (target_rlim_swap == TARGET_RLIM_INFINITY)
972 return RLIM_INFINITY;
973
974 result = target_rlim_swap;
975 if (target_rlim_swap != (rlim_t)result)
976 return RLIM_INFINITY;
977
978 return result;
979 }
980
981 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
982 {
983 abi_ulong target_rlim_swap;
984 abi_ulong result;
985
986 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
987 target_rlim_swap = TARGET_RLIM_INFINITY;
988 else
989 target_rlim_swap = rlim;
990 result = tswapal(target_rlim_swap);
991
992 return result;
993 }
994
995 static inline int target_to_host_resource(int code)
996 {
997 switch (code) {
998 case TARGET_RLIMIT_AS:
999 return RLIMIT_AS;
1000 case TARGET_RLIMIT_CORE:
1001 return RLIMIT_CORE;
1002 case TARGET_RLIMIT_CPU:
1003 return RLIMIT_CPU;
1004 case TARGET_RLIMIT_DATA:
1005 return RLIMIT_DATA;
1006 case TARGET_RLIMIT_FSIZE:
1007 return RLIMIT_FSIZE;
1008 case TARGET_RLIMIT_LOCKS:
1009 return RLIMIT_LOCKS;
1010 case TARGET_RLIMIT_MEMLOCK:
1011 return RLIMIT_MEMLOCK;
1012 case TARGET_RLIMIT_MSGQUEUE:
1013 return RLIMIT_MSGQUEUE;
1014 case TARGET_RLIMIT_NICE:
1015 return RLIMIT_NICE;
1016 case TARGET_RLIMIT_NOFILE:
1017 return RLIMIT_NOFILE;
1018 case TARGET_RLIMIT_NPROC:
1019 return RLIMIT_NPROC;
1020 case TARGET_RLIMIT_RSS:
1021 return RLIMIT_RSS;
1022 case TARGET_RLIMIT_RTPRIO:
1023 return RLIMIT_RTPRIO;
1024 case TARGET_RLIMIT_SIGPENDING:
1025 return RLIMIT_SIGPENDING;
1026 case TARGET_RLIMIT_STACK:
1027 return RLIMIT_STACK;
1028 default:
1029 return code;
1030 }
1031 }
1032
1033 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1034 abi_ulong target_tv_addr)
1035 {
1036 struct target_timeval *target_tv;
1037
1038 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1039 return -TARGET_EFAULT;
1040
1041 __get_user(tv->tv_sec, &target_tv->tv_sec);
1042 __get_user(tv->tv_usec, &target_tv->tv_usec);
1043
1044 unlock_user_struct(target_tv, target_tv_addr, 0);
1045
1046 return 0;
1047 }
1048
1049 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1050 const struct timeval *tv)
1051 {
1052 struct target_timeval *target_tv;
1053
1054 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1055 return -TARGET_EFAULT;
1056
1057 __put_user(tv->tv_sec, &target_tv->tv_sec);
1058 __put_user(tv->tv_usec, &target_tv->tv_usec);
1059
1060 unlock_user_struct(target_tv, target_tv_addr, 1);
1061
1062 return 0;
1063 }
1064
1065 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1066 #include <mqueue.h>
1067
1068 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1069 abi_ulong target_mq_attr_addr)
1070 {
1071 struct target_mq_attr *target_mq_attr;
1072
1073 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1074 target_mq_attr_addr, 1))
1075 return -TARGET_EFAULT;
1076
1077 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1078 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1079 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1080 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1081
1082 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1083
1084 return 0;
1085 }
1086
1087 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1088 const struct mq_attr *attr)
1089 {
1090 struct target_mq_attr *target_mq_attr;
1091
1092 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1093 target_mq_attr_addr, 0))
1094 return -TARGET_EFAULT;
1095
1096 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1097 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1098 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1099 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1100
1101 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1102
1103 return 0;
1104 }
1105 #endif
1106
1107 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1108 /* do_select() must return target values and target errnos. */
1109 static abi_long do_select(int n,
1110 abi_ulong rfd_addr, abi_ulong wfd_addr,
1111 abi_ulong efd_addr, abi_ulong target_tv_addr)
1112 {
1113 fd_set rfds, wfds, efds;
1114 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1115 struct timeval tv, *tv_ptr;
1116 abi_long ret;
1117
1118 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1119 if (ret) {
1120 return ret;
1121 }
1122 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1123 if (ret) {
1124 return ret;
1125 }
1126 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1127 if (ret) {
1128 return ret;
1129 }
1130
1131 if (target_tv_addr) {
1132 if (copy_from_user_timeval(&tv, target_tv_addr))
1133 return -TARGET_EFAULT;
1134 tv_ptr = &tv;
1135 } else {
1136 tv_ptr = NULL;
1137 }
1138
1139 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1140
1141 if (!is_error(ret)) {
1142 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1143 return -TARGET_EFAULT;
1144 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1145 return -TARGET_EFAULT;
1146 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1147 return -TARGET_EFAULT;
1148
1149 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1150 return -TARGET_EFAULT;
1151 }
1152
1153 return ret;
1154 }
1155 #endif
1156
1157 static abi_long do_pipe2(int host_pipe[], int flags)
1158 {
1159 #ifdef CONFIG_PIPE2
1160 return pipe2(host_pipe, flags);
1161 #else
1162 return -ENOSYS;
1163 #endif
1164 }
1165
1166 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1167 int flags, int is_pipe2)
1168 {
1169 int host_pipe[2];
1170 abi_long ret;
1171 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1172
1173 if (is_error(ret))
1174 return get_errno(ret);
1175
1176 /* Several targets have special calling conventions for the original
1177 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1178 if (!is_pipe2) {
1179 #if defined(TARGET_ALPHA)
1180 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1181 return host_pipe[0];
1182 #elif defined(TARGET_MIPS)
1183 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1184 return host_pipe[0];
1185 #elif defined(TARGET_SH4)
1186 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1187 return host_pipe[0];
1188 #endif
1189 }
1190
1191 if (put_user_s32(host_pipe[0], pipedes)
1192 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1193 return -TARGET_EFAULT;
1194 return get_errno(ret);
1195 }
1196
1197 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1198 abi_ulong target_addr,
1199 socklen_t len)
1200 {
1201 struct target_ip_mreqn *target_smreqn;
1202
1203 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1204 if (!target_smreqn)
1205 return -TARGET_EFAULT;
1206 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1207 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1208 if (len == sizeof(struct target_ip_mreqn))
1209 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1210 unlock_user(target_smreqn, target_addr, 0);
1211
1212 return 0;
1213 }
1214
1215 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1216 abi_ulong target_addr,
1217 socklen_t len)
1218 {
1219 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1220 sa_family_t sa_family;
1221 struct target_sockaddr *target_saddr;
1222
1223 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1224 if (!target_saddr)
1225 return -TARGET_EFAULT;
1226
1227 sa_family = tswap16(target_saddr->sa_family);
1228
1229 /* Oops. The caller might send a incomplete sun_path; sun_path
1230 * must be terminated by \0 (see the manual page), but
1231 * unfortunately it is quite common to specify sockaddr_un
1232 * length as "strlen(x->sun_path)" while it should be
1233 * "strlen(...) + 1". We'll fix that here if needed.
1234 * Linux kernel has a similar feature.
1235 */
1236
1237 if (sa_family == AF_UNIX) {
1238 if (len < unix_maxlen && len > 0) {
1239 char *cp = (char*)target_saddr;
1240
1241 if ( cp[len-1] && !cp[len] )
1242 len++;
1243 }
1244 if (len > unix_maxlen)
1245 len = unix_maxlen;
1246 }
1247
1248 memcpy(addr, target_saddr, len);
1249 addr->sa_family = sa_family;
1250 unlock_user(target_saddr, target_addr, 0);
1251
1252 return 0;
1253 }
1254
1255 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1256 struct sockaddr *addr,
1257 socklen_t len)
1258 {
1259 struct target_sockaddr *target_saddr;
1260
1261 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1262 if (!target_saddr)
1263 return -TARGET_EFAULT;
1264 memcpy(target_saddr, addr, len);
1265 target_saddr->sa_family = tswap16(addr->sa_family);
1266 unlock_user(target_saddr, target_addr, len);
1267
1268 return 0;
1269 }
1270
1271 /* ??? Should this also swap msgh->name? */
1272 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1273 struct target_msghdr *target_msgh)
1274 {
1275 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1276 abi_long msg_controllen;
1277 abi_ulong target_cmsg_addr;
1278 struct target_cmsghdr *target_cmsg;
1279 socklen_t space = 0;
1280
1281 msg_controllen = tswapal(target_msgh->msg_controllen);
1282 if (msg_controllen < sizeof (struct target_cmsghdr))
1283 goto the_end;
1284 target_cmsg_addr = tswapal(target_msgh->msg_control);
1285 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1286 if (!target_cmsg)
1287 return -TARGET_EFAULT;
1288
1289 while (cmsg && target_cmsg) {
1290 void *data = CMSG_DATA(cmsg);
1291 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1292
1293 int len = tswapal(target_cmsg->cmsg_len)
1294 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1295
1296 space += CMSG_SPACE(len);
1297 if (space > msgh->msg_controllen) {
1298 space -= CMSG_SPACE(len);
1299 gemu_log("Host cmsg overflow\n");
1300 break;
1301 }
1302
1303 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1304 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1305 cmsg->cmsg_len = CMSG_LEN(len);
1306
1307 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1308 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1309 memcpy(data, target_data, len);
1310 } else {
1311 int *fd = (int *)data;
1312 int *target_fd = (int *)target_data;
1313 int i, numfds = len / sizeof(int);
1314
1315 for (i = 0; i < numfds; i++)
1316 fd[i] = tswap32(target_fd[i]);
1317 }
1318
1319 cmsg = CMSG_NXTHDR(msgh, cmsg);
1320 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1321 }
1322 unlock_user(target_cmsg, target_cmsg_addr, 0);
1323 the_end:
1324 msgh->msg_controllen = space;
1325 return 0;
1326 }
1327
1328 /* ??? Should this also swap msgh->name? */
1329 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1330 struct msghdr *msgh)
1331 {
1332 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1333 abi_long msg_controllen;
1334 abi_ulong target_cmsg_addr;
1335 struct target_cmsghdr *target_cmsg;
1336 socklen_t space = 0;
1337
1338 msg_controllen = tswapal(target_msgh->msg_controllen);
1339 if (msg_controllen < sizeof (struct target_cmsghdr))
1340 goto the_end;
1341 target_cmsg_addr = tswapal(target_msgh->msg_control);
1342 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1343 if (!target_cmsg)
1344 return -TARGET_EFAULT;
1345
1346 while (cmsg && target_cmsg) {
1347 void *data = CMSG_DATA(cmsg);
1348 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1349
1350 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1351
1352 space += TARGET_CMSG_SPACE(len);
1353 if (space > msg_controllen) {
1354 space -= TARGET_CMSG_SPACE(len);
1355 gemu_log("Target cmsg overflow\n");
1356 break;
1357 }
1358
1359 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1360 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1361 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1362
1363 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1364 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1365 memcpy(target_data, data, len);
1366 } else {
1367 int *fd = (int *)data;
1368 int *target_fd = (int *)target_data;
1369 int i, numfds = len / sizeof(int);
1370
1371 for (i = 0; i < numfds; i++)
1372 target_fd[i] = tswap32(fd[i]);
1373 }
1374
1375 cmsg = CMSG_NXTHDR(msgh, cmsg);
1376 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1377 }
1378 unlock_user(target_cmsg, target_cmsg_addr, space);
1379 the_end:
1380 target_msgh->msg_controllen = tswapal(space);
1381 return 0;
1382 }
1383
1384 /* do_setsockopt() Must return target values and target errnos. */
1385 static abi_long do_setsockopt(int sockfd, int level, int optname,
1386 abi_ulong optval_addr, socklen_t optlen)
1387 {
1388 abi_long ret;
1389 int val;
1390 struct ip_mreqn *ip_mreq;
1391 struct ip_mreq_source *ip_mreq_source;
1392
1393 switch(level) {
1394 case SOL_TCP:
1395 /* TCP options all take an 'int' value. */
1396 if (optlen < sizeof(uint32_t))
1397 return -TARGET_EINVAL;
1398
1399 if (get_user_u32(val, optval_addr))
1400 return -TARGET_EFAULT;
1401 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1402 break;
1403 case SOL_IP:
1404 switch(optname) {
1405 case IP_TOS:
1406 case IP_TTL:
1407 case IP_HDRINCL:
1408 case IP_ROUTER_ALERT:
1409 case IP_RECVOPTS:
1410 case IP_RETOPTS:
1411 case IP_PKTINFO:
1412 case IP_MTU_DISCOVER:
1413 case IP_RECVERR:
1414 case IP_RECVTOS:
1415 #ifdef IP_FREEBIND
1416 case IP_FREEBIND:
1417 #endif
1418 case IP_MULTICAST_TTL:
1419 case IP_MULTICAST_LOOP:
1420 val = 0;
1421 if (optlen >= sizeof(uint32_t)) {
1422 if (get_user_u32(val, optval_addr))
1423 return -TARGET_EFAULT;
1424 } else if (optlen >= 1) {
1425 if (get_user_u8(val, optval_addr))
1426 return -TARGET_EFAULT;
1427 }
1428 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1429 break;
1430 case IP_ADD_MEMBERSHIP:
1431 case IP_DROP_MEMBERSHIP:
1432 if (optlen < sizeof (struct target_ip_mreq) ||
1433 optlen > sizeof (struct target_ip_mreqn))
1434 return -TARGET_EINVAL;
1435
1436 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1437 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1438 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1439 break;
1440
1441 case IP_BLOCK_SOURCE:
1442 case IP_UNBLOCK_SOURCE:
1443 case IP_ADD_SOURCE_MEMBERSHIP:
1444 case IP_DROP_SOURCE_MEMBERSHIP:
1445 if (optlen != sizeof (struct target_ip_mreq_source))
1446 return -TARGET_EINVAL;
1447
1448 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1449 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1450 unlock_user (ip_mreq_source, optval_addr, 0);
1451 break;
1452
1453 default:
1454 goto unimplemented;
1455 }
1456 break;
1457 case TARGET_SOL_SOCKET:
1458 switch (optname) {
1459 /* Options with 'int' argument. */
1460 case TARGET_SO_DEBUG:
1461 optname = SO_DEBUG;
1462 break;
1463 case TARGET_SO_REUSEADDR:
1464 optname = SO_REUSEADDR;
1465 break;
1466 case TARGET_SO_TYPE:
1467 optname = SO_TYPE;
1468 break;
1469 case TARGET_SO_ERROR:
1470 optname = SO_ERROR;
1471 break;
1472 case TARGET_SO_DONTROUTE:
1473 optname = SO_DONTROUTE;
1474 break;
1475 case TARGET_SO_BROADCAST:
1476 optname = SO_BROADCAST;
1477 break;
1478 case TARGET_SO_SNDBUF:
1479 optname = SO_SNDBUF;
1480 break;
1481 case TARGET_SO_RCVBUF:
1482 optname = SO_RCVBUF;
1483 break;
1484 case TARGET_SO_KEEPALIVE:
1485 optname = SO_KEEPALIVE;
1486 break;
1487 case TARGET_SO_OOBINLINE:
1488 optname = SO_OOBINLINE;
1489 break;
1490 case TARGET_SO_NO_CHECK:
1491 optname = SO_NO_CHECK;
1492 break;
1493 case TARGET_SO_PRIORITY:
1494 optname = SO_PRIORITY;
1495 break;
1496 #ifdef SO_BSDCOMPAT
1497 case TARGET_SO_BSDCOMPAT:
1498 optname = SO_BSDCOMPAT;
1499 break;
1500 #endif
1501 case TARGET_SO_PASSCRED:
1502 optname = SO_PASSCRED;
1503 break;
1504 case TARGET_SO_TIMESTAMP:
1505 optname = SO_TIMESTAMP;
1506 break;
1507 case TARGET_SO_RCVLOWAT:
1508 optname = SO_RCVLOWAT;
1509 break;
1510 case TARGET_SO_RCVTIMEO:
1511 optname = SO_RCVTIMEO;
1512 break;
1513 case TARGET_SO_SNDTIMEO:
1514 optname = SO_SNDTIMEO;
1515 break;
1516 break;
1517 default:
1518 goto unimplemented;
1519 }
1520 if (optlen < sizeof(uint32_t))
1521 return -TARGET_EINVAL;
1522
1523 if (get_user_u32(val, optval_addr))
1524 return -TARGET_EFAULT;
1525 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1526 break;
1527 default:
1528 unimplemented:
1529 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1530 ret = -TARGET_ENOPROTOOPT;
1531 }
1532 return ret;
1533 }
1534
1535 /* do_getsockopt() Must return target values and target errnos. */
1536 static abi_long do_getsockopt(int sockfd, int level, int optname,
1537 abi_ulong optval_addr, abi_ulong optlen)
1538 {
1539 abi_long ret;
1540 int len, val;
1541 socklen_t lv;
1542
1543 switch(level) {
1544 case TARGET_SOL_SOCKET:
1545 level = SOL_SOCKET;
1546 switch (optname) {
1547 /* These don't just return a single integer */
1548 case TARGET_SO_LINGER:
1549 case TARGET_SO_RCVTIMEO:
1550 case TARGET_SO_SNDTIMEO:
1551 case TARGET_SO_PEERNAME:
1552 goto unimplemented;
1553 case TARGET_SO_PEERCRED: {
1554 struct ucred cr;
1555 socklen_t crlen;
1556 struct target_ucred *tcr;
1557
1558 if (get_user_u32(len, optlen)) {
1559 return -TARGET_EFAULT;
1560 }
1561 if (len < 0) {
1562 return -TARGET_EINVAL;
1563 }
1564
1565 crlen = sizeof(cr);
1566 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1567 &cr, &crlen));
1568 if (ret < 0) {
1569 return ret;
1570 }
1571 if (len > crlen) {
1572 len = crlen;
1573 }
1574 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1575 return -TARGET_EFAULT;
1576 }
1577 __put_user(cr.pid, &tcr->pid);
1578 __put_user(cr.uid, &tcr->uid);
1579 __put_user(cr.gid, &tcr->gid);
1580 unlock_user_struct(tcr, optval_addr, 1);
1581 if (put_user_u32(len, optlen)) {
1582 return -TARGET_EFAULT;
1583 }
1584 break;
1585 }
1586 /* Options with 'int' argument. */
1587 case TARGET_SO_DEBUG:
1588 optname = SO_DEBUG;
1589 goto int_case;
1590 case TARGET_SO_REUSEADDR:
1591 optname = SO_REUSEADDR;
1592 goto int_case;
1593 case TARGET_SO_TYPE:
1594 optname = SO_TYPE;
1595 goto int_case;
1596 case TARGET_SO_ERROR:
1597 optname = SO_ERROR;
1598 goto int_case;
1599 case TARGET_SO_DONTROUTE:
1600 optname = SO_DONTROUTE;
1601 goto int_case;
1602 case TARGET_SO_BROADCAST:
1603 optname = SO_BROADCAST;
1604 goto int_case;
1605 case TARGET_SO_SNDBUF:
1606 optname = SO_SNDBUF;
1607 goto int_case;
1608 case TARGET_SO_RCVBUF:
1609 optname = SO_RCVBUF;
1610 goto int_case;
1611 case TARGET_SO_KEEPALIVE:
1612 optname = SO_KEEPALIVE;
1613 goto int_case;
1614 case TARGET_SO_OOBINLINE:
1615 optname = SO_OOBINLINE;
1616 goto int_case;
1617 case TARGET_SO_NO_CHECK:
1618 optname = SO_NO_CHECK;
1619 goto int_case;
1620 case TARGET_SO_PRIORITY:
1621 optname = SO_PRIORITY;
1622 goto int_case;
1623 #ifdef SO_BSDCOMPAT
1624 case TARGET_SO_BSDCOMPAT:
1625 optname = SO_BSDCOMPAT;
1626 goto int_case;
1627 #endif
1628 case TARGET_SO_PASSCRED:
1629 optname = SO_PASSCRED;
1630 goto int_case;
1631 case TARGET_SO_TIMESTAMP:
1632 optname = SO_TIMESTAMP;
1633 goto int_case;
1634 case TARGET_SO_RCVLOWAT:
1635 optname = SO_RCVLOWAT;
1636 goto int_case;
1637 default:
1638 goto int_case;
1639 }
1640 break;
1641 case SOL_TCP:
1642 /* TCP options all take an 'int' value. */
1643 int_case:
1644 if (get_user_u32(len, optlen))
1645 return -TARGET_EFAULT;
1646 if (len < 0)
1647 return -TARGET_EINVAL;
1648 lv = sizeof(lv);
1649 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1650 if (ret < 0)
1651 return ret;
1652 if (len > lv)
1653 len = lv;
1654 if (len == 4) {
1655 if (put_user_u32(val, optval_addr))
1656 return -TARGET_EFAULT;
1657 } else {
1658 if (put_user_u8(val, optval_addr))
1659 return -TARGET_EFAULT;
1660 }
1661 if (put_user_u32(len, optlen))
1662 return -TARGET_EFAULT;
1663 break;
1664 case SOL_IP:
1665 switch(optname) {
1666 case IP_TOS:
1667 case IP_TTL:
1668 case IP_HDRINCL:
1669 case IP_ROUTER_ALERT:
1670 case IP_RECVOPTS:
1671 case IP_RETOPTS:
1672 case IP_PKTINFO:
1673 case IP_MTU_DISCOVER:
1674 case IP_RECVERR:
1675 case IP_RECVTOS:
1676 #ifdef IP_FREEBIND
1677 case IP_FREEBIND:
1678 #endif
1679 case IP_MULTICAST_TTL:
1680 case IP_MULTICAST_LOOP:
1681 if (get_user_u32(len, optlen))
1682 return -TARGET_EFAULT;
1683 if (len < 0)
1684 return -TARGET_EINVAL;
1685 lv = sizeof(lv);
1686 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1687 if (ret < 0)
1688 return ret;
1689 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1690 len = 1;
1691 if (put_user_u32(len, optlen)
1692 || put_user_u8(val, optval_addr))
1693 return -TARGET_EFAULT;
1694 } else {
1695 if (len > sizeof(int))
1696 len = sizeof(int);
1697 if (put_user_u32(len, optlen)
1698 || put_user_u32(val, optval_addr))
1699 return -TARGET_EFAULT;
1700 }
1701 break;
1702 default:
1703 ret = -TARGET_ENOPROTOOPT;
1704 break;
1705 }
1706 break;
1707 default:
1708 unimplemented:
1709 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1710 level, optname);
1711 ret = -TARGET_EOPNOTSUPP;
1712 break;
1713 }
1714 return ret;
1715 }
1716
1717 /* FIXME
1718 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1719 * other lock functions have a return code of 0 for failure.
1720 */
1721 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1722 int count, int copy)
1723 {
1724 struct target_iovec *target_vec;
1725 abi_ulong base;
1726 int i;
1727
1728 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1729 if (!target_vec)
1730 return -TARGET_EFAULT;
1731 for(i = 0;i < count; i++) {
1732 base = tswapal(target_vec[i].iov_base);
1733 vec[i].iov_len = tswapal(target_vec[i].iov_len);
1734 if (vec[i].iov_len != 0) {
1735 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1736 /* Don't check lock_user return value. We must call writev even
1737 if a element has invalid base address. */
1738 } else {
1739 /* zero length pointer is ignored */
1740 vec[i].iov_base = NULL;
1741 }
1742 }
1743 unlock_user (target_vec, target_addr, 0);
1744 return 0;
1745 }
1746
1747 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1748 int count, int copy)
1749 {
1750 struct target_iovec *target_vec;
1751 abi_ulong base;
1752 int i;
1753
1754 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1755 if (!target_vec)
1756 return -TARGET_EFAULT;
1757 for(i = 0;i < count; i++) {
1758 if (target_vec[i].iov_base) {
1759 base = tswapal(target_vec[i].iov_base);
1760 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1761 }
1762 }
1763 unlock_user (target_vec, target_addr, 0);
1764
1765 return 0;
1766 }
1767
1768 /* do_socket() Must return target values and target errnos. */
1769 static abi_long do_socket(int domain, int type, int protocol)
1770 {
1771 #if defined(TARGET_MIPS)
1772 switch(type) {
1773 case TARGET_SOCK_DGRAM:
1774 type = SOCK_DGRAM;
1775 break;
1776 case TARGET_SOCK_STREAM:
1777 type = SOCK_STREAM;
1778 break;
1779 case TARGET_SOCK_RAW:
1780 type = SOCK_RAW;
1781 break;
1782 case TARGET_SOCK_RDM:
1783 type = SOCK_RDM;
1784 break;
1785 case TARGET_SOCK_SEQPACKET:
1786 type = SOCK_SEQPACKET;
1787 break;
1788 case TARGET_SOCK_PACKET:
1789 type = SOCK_PACKET;
1790 break;
1791 }
1792 #endif
1793 if (domain == PF_NETLINK)
1794 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1795 return get_errno(socket(domain, type, protocol));
1796 }
1797
1798 /* do_bind() Must return target values and target errnos. */
1799 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1800 socklen_t addrlen)
1801 {
1802 void *addr;
1803 abi_long ret;
1804
1805 if ((int)addrlen < 0) {
1806 return -TARGET_EINVAL;
1807 }
1808
1809 addr = alloca(addrlen+1);
1810
1811 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1812 if (ret)
1813 return ret;
1814
1815 return get_errno(bind(sockfd, addr, addrlen));
1816 }
1817
1818 /* do_connect() Must return target values and target errnos. */
1819 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1820 socklen_t addrlen)
1821 {
1822 void *addr;
1823 abi_long ret;
1824
1825 if ((int)addrlen < 0) {
1826 return -TARGET_EINVAL;
1827 }
1828
1829 addr = alloca(addrlen);
1830
1831 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1832 if (ret)
1833 return ret;
1834
1835 return get_errno(connect(sockfd, addr, addrlen));
1836 }
1837
1838 /* do_sendrecvmsg() Must return target values and target errnos. */
1839 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1840 int flags, int send)
1841 {
1842 abi_long ret, len;
1843 struct target_msghdr *msgp;
1844 struct msghdr msg;
1845 int count;
1846 struct iovec *vec;
1847 abi_ulong target_vec;
1848
1849 /* FIXME */
1850 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1851 msgp,
1852 target_msg,
1853 send ? 1 : 0))
1854 return -TARGET_EFAULT;
1855 if (msgp->msg_name) {
1856 msg.msg_namelen = tswap32(msgp->msg_namelen);
1857 msg.msg_name = alloca(msg.msg_namelen);
1858 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1859 msg.msg_namelen);
1860 if (ret) {
1861 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1862 return ret;
1863 }
1864 } else {
1865 msg.msg_name = NULL;
1866 msg.msg_namelen = 0;
1867 }
1868 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1869 msg.msg_control = alloca(msg.msg_controllen);
1870 msg.msg_flags = tswap32(msgp->msg_flags);
1871
1872 count = tswapal(msgp->msg_iovlen);
1873 vec = alloca(count * sizeof(struct iovec));
1874 target_vec = tswapal(msgp->msg_iov);
1875 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1876 msg.msg_iovlen = count;
1877 msg.msg_iov = vec;
1878
1879 if (send) {
1880 ret = target_to_host_cmsg(&msg, msgp);
1881 if (ret == 0)
1882 ret = get_errno(sendmsg(fd, &msg, flags));
1883 } else {
1884 ret = get_errno(recvmsg(fd, &msg, flags));
1885 if (!is_error(ret)) {
1886 len = ret;
1887 ret = host_to_target_cmsg(msgp, &msg);
1888 if (!is_error(ret))
1889 ret = len;
1890 }
1891 }
1892 unlock_iovec(vec, target_vec, count, !send);
1893 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1894 return ret;
1895 }
1896
1897 /* do_accept() Must return target values and target errnos. */
1898 static abi_long do_accept(int fd, abi_ulong target_addr,
1899 abi_ulong target_addrlen_addr)
1900 {
1901 socklen_t addrlen;
1902 void *addr;
1903 abi_long ret;
1904
1905 if (target_addr == 0)
1906 return get_errno(accept(fd, NULL, NULL));
1907
1908 /* linux returns EINVAL if addrlen pointer is invalid */
1909 if (get_user_u32(addrlen, target_addrlen_addr))
1910 return -TARGET_EINVAL;
1911
1912 if ((int)addrlen < 0) {
1913 return -TARGET_EINVAL;
1914 }
1915
1916 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1917 return -TARGET_EINVAL;
1918
1919 addr = alloca(addrlen);
1920
1921 ret = get_errno(accept(fd, addr, &addrlen));
1922 if (!is_error(ret)) {
1923 host_to_target_sockaddr(target_addr, addr, addrlen);
1924 if (put_user_u32(addrlen, target_addrlen_addr))
1925 ret = -TARGET_EFAULT;
1926 }
1927 return ret;
1928 }
1929
1930 /* do_getpeername() Must return target values and target errnos. */
1931 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1932 abi_ulong target_addrlen_addr)
1933 {
1934 socklen_t addrlen;
1935 void *addr;
1936 abi_long ret;
1937
1938 if (get_user_u32(addrlen, target_addrlen_addr))
1939 return -TARGET_EFAULT;
1940
1941 if ((int)addrlen < 0) {
1942 return -TARGET_EINVAL;
1943 }
1944
1945 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1946 return -TARGET_EFAULT;
1947
1948 addr = alloca(addrlen);
1949
1950 ret = get_errno(getpeername(fd, addr, &addrlen));
1951 if (!is_error(ret)) {
1952 host_to_target_sockaddr(target_addr, addr, addrlen);
1953 if (put_user_u32(addrlen, target_addrlen_addr))
1954 ret = -TARGET_EFAULT;
1955 }
1956 return ret;
1957 }
1958
1959 /* do_getsockname() Must return target values and target errnos. */
1960 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1961 abi_ulong target_addrlen_addr)
1962 {
1963 socklen_t addrlen;
1964 void *addr;
1965 abi_long ret;
1966
1967 if (get_user_u32(addrlen, target_addrlen_addr))
1968 return -TARGET_EFAULT;
1969
1970 if ((int)addrlen < 0) {
1971 return -TARGET_EINVAL;
1972 }
1973
1974 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1975 return -TARGET_EFAULT;
1976
1977 addr = alloca(addrlen);
1978
1979 ret = get_errno(getsockname(fd, addr, &addrlen));
1980 if (!is_error(ret)) {
1981 host_to_target_sockaddr(target_addr, addr, addrlen);
1982 if (put_user_u32(addrlen, target_addrlen_addr))
1983 ret = -TARGET_EFAULT;
1984 }
1985 return ret;
1986 }
1987
1988 /* do_socketpair() Must return target values and target errnos. */
1989 static abi_long do_socketpair(int domain, int type, int protocol,
1990 abi_ulong target_tab_addr)
1991 {
1992 int tab[2];
1993 abi_long ret;
1994
1995 ret = get_errno(socketpair(domain, type, protocol, tab));
1996 if (!is_error(ret)) {
1997 if (put_user_s32(tab[0], target_tab_addr)
1998 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1999 ret = -TARGET_EFAULT;
2000 }
2001 return ret;
2002 }
2003
2004 /* do_sendto() Must return target values and target errnos. */
2005 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2006 abi_ulong target_addr, socklen_t addrlen)
2007 {
2008 void *addr;
2009 void *host_msg;
2010 abi_long ret;
2011
2012 if ((int)addrlen < 0) {
2013 return -TARGET_EINVAL;
2014 }
2015
2016 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2017 if (!host_msg)
2018 return -TARGET_EFAULT;
2019 if (target_addr) {
2020 addr = alloca(addrlen);
2021 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2022 if (ret) {
2023 unlock_user(host_msg, msg, 0);
2024 return ret;
2025 }
2026 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2027 } else {
2028 ret = get_errno(send(fd, host_msg, len, flags));
2029 }
2030 unlock_user(host_msg, msg, 0);
2031 return ret;
2032 }
2033
2034 /* do_recvfrom() Must return target values and target errnos. */
2035 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2036 abi_ulong target_addr,
2037 abi_ulong target_addrlen)
2038 {
2039 socklen_t addrlen;
2040 void *addr;
2041 void *host_msg;
2042 abi_long ret;
2043
2044 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2045 if (!host_msg)
2046 return -TARGET_EFAULT;
2047 if (target_addr) {
2048 if (get_user_u32(addrlen, target_addrlen)) {
2049 ret = -TARGET_EFAULT;
2050 goto fail;
2051 }
2052 if ((int)addrlen < 0) {
2053 ret = -TARGET_EINVAL;
2054 goto fail;
2055 }
2056 addr = alloca(addrlen);
2057 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2058 } else {
2059 addr = NULL; /* To keep compiler quiet. */
2060 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2061 }
2062 if (!is_error(ret)) {
2063 if (target_addr) {
2064 host_to_target_sockaddr(target_addr, addr, addrlen);
2065 if (put_user_u32(addrlen, target_addrlen)) {
2066 ret = -TARGET_EFAULT;
2067 goto fail;
2068 }
2069 }
2070 unlock_user(host_msg, msg, len);
2071 } else {
2072 fail:
2073 unlock_user(host_msg, msg, 0);
2074 }
2075 return ret;
2076 }
2077
2078 #ifdef TARGET_NR_socketcall
2079 /* do_socketcall() Must return target values and target errnos. */
2080 static abi_long do_socketcall(int num, abi_ulong vptr)
2081 {
2082 abi_long ret;
2083 const int n = sizeof(abi_ulong);
2084
2085 switch(num) {
2086 case SOCKOP_socket:
2087 {
2088 abi_ulong domain, type, protocol;
2089
2090 if (get_user_ual(domain, vptr)
2091 || get_user_ual(type, vptr + n)
2092 || get_user_ual(protocol, vptr + 2 * n))
2093 return -TARGET_EFAULT;
2094
2095 ret = do_socket(domain, type, protocol);
2096 }
2097 break;
2098 case SOCKOP_bind:
2099 {
2100 abi_ulong sockfd;
2101 abi_ulong target_addr;
2102 socklen_t addrlen;
2103
2104 if (get_user_ual(sockfd, vptr)
2105 || get_user_ual(target_addr, vptr + n)
2106 || get_user_ual(addrlen, vptr + 2 * n))
2107 return -TARGET_EFAULT;
2108
2109 ret = do_bind(sockfd, target_addr, addrlen);
2110 }
2111 break;
2112 case SOCKOP_connect:
2113 {
2114 abi_ulong sockfd;
2115 abi_ulong target_addr;
2116 socklen_t addrlen;
2117
2118 if (get_user_ual(sockfd, vptr)
2119 || get_user_ual(target_addr, vptr + n)
2120 || get_user_ual(addrlen, vptr + 2 * n))
2121 return -TARGET_EFAULT;
2122
2123 ret = do_connect(sockfd, target_addr, addrlen);
2124 }
2125 break;
2126 case SOCKOP_listen:
2127 {
2128 abi_ulong sockfd, backlog;
2129
2130 if (get_user_ual(sockfd, vptr)
2131 || get_user_ual(backlog, vptr + n))
2132 return -TARGET_EFAULT;
2133
2134 ret = get_errno(listen(sockfd, backlog));
2135 }
2136 break;
2137 case SOCKOP_accept:
2138 {
2139 abi_ulong sockfd;
2140 abi_ulong target_addr, target_addrlen;
2141
2142 if (get_user_ual(sockfd, vptr)
2143 || get_user_ual(target_addr, vptr + n)
2144 || get_user_ual(target_addrlen, vptr + 2 * n))
2145 return -TARGET_EFAULT;
2146
2147 ret = do_accept(sockfd, target_addr, target_addrlen);
2148 }
2149 break;
2150 case SOCKOP_getsockname:
2151 {
2152 abi_ulong sockfd;
2153 abi_ulong target_addr, target_addrlen;
2154
2155 if (get_user_ual(sockfd, vptr)
2156 || get_user_ual(target_addr, vptr + n)
2157 || get_user_ual(target_addrlen, vptr + 2 * n))
2158 return -TARGET_EFAULT;
2159
2160 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2161 }
2162 break;
2163 case SOCKOP_getpeername:
2164 {
2165 abi_ulong sockfd;
2166 abi_ulong target_addr, target_addrlen;
2167
2168 if (get_user_ual(sockfd, vptr)
2169 || get_user_ual(target_addr, vptr + n)
2170 || get_user_ual(target_addrlen, vptr + 2 * n))
2171 return -TARGET_EFAULT;
2172
2173 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2174 }
2175 break;
2176 case SOCKOP_socketpair:
2177 {
2178 abi_ulong domain, type, protocol;
2179 abi_ulong tab;
2180
2181 if (get_user_ual(domain, vptr)
2182 || get_user_ual(type, vptr + n)
2183 || get_user_ual(protocol, vptr + 2 * n)
2184 || get_user_ual(tab, vptr + 3 * n))
2185 return -TARGET_EFAULT;
2186
2187 ret = do_socketpair(domain, type, protocol, tab);
2188 }
2189 break;
2190 case SOCKOP_send:
2191 {
2192 abi_ulong sockfd;
2193 abi_ulong msg;
2194 size_t len;
2195 abi_ulong flags;
2196
2197 if (get_user_ual(sockfd, vptr)
2198 || get_user_ual(msg, vptr + n)
2199 || get_user_ual(len, vptr + 2 * n)
2200 || get_user_ual(flags, vptr + 3 * n))
2201 return -TARGET_EFAULT;
2202
2203 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2204 }
2205 break;
2206 case SOCKOP_recv:
2207 {
2208 abi_ulong sockfd;
2209 abi_ulong msg;
2210 size_t len;
2211 abi_ulong flags;
2212
2213 if (get_user_ual(sockfd, vptr)
2214 || get_user_ual(msg, vptr + n)
2215 || get_user_ual(len, vptr + 2 * n)
2216 || get_user_ual(flags, vptr + 3 * n))
2217 return -TARGET_EFAULT;
2218
2219 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2220 }
2221 break;
2222 case SOCKOP_sendto:
2223 {
2224 abi_ulong sockfd;
2225 abi_ulong msg;
2226 size_t len;
2227 abi_ulong flags;
2228 abi_ulong addr;
2229 socklen_t addrlen;
2230
2231 if (get_user_ual(sockfd, vptr)
2232 || get_user_ual(msg, vptr + n)
2233 || get_user_ual(len, vptr + 2 * n)
2234 || get_user_ual(flags, vptr + 3 * n)
2235 || get_user_ual(addr, vptr + 4 * n)
2236 || get_user_ual(addrlen, vptr + 5 * n))
2237 return -TARGET_EFAULT;
2238
2239 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2240 }
2241 break;
2242 case SOCKOP_recvfrom:
2243 {
2244 abi_ulong sockfd;
2245 abi_ulong msg;
2246 size_t len;
2247 abi_ulong flags;
2248 abi_ulong addr;
2249 socklen_t addrlen;
2250
2251 if (get_user_ual(sockfd, vptr)
2252 || get_user_ual(msg, vptr + n)
2253 || get_user_ual(len, vptr + 2 * n)
2254 || get_user_ual(flags, vptr + 3 * n)
2255 || get_user_ual(addr, vptr + 4 * n)
2256 || get_user_ual(addrlen, vptr + 5 * n))
2257 return -TARGET_EFAULT;
2258
2259 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2260 }
2261 break;
2262 case SOCKOP_shutdown:
2263 {
2264 abi_ulong sockfd, how;
2265
2266 if (get_user_ual(sockfd, vptr)
2267 || get_user_ual(how, vptr + n))
2268 return -TARGET_EFAULT;
2269
2270 ret = get_errno(shutdown(sockfd, how));
2271 }
2272 break;
2273 case SOCKOP_sendmsg:
2274 case SOCKOP_recvmsg:
2275 {
2276 abi_ulong fd;
2277 abi_ulong target_msg;
2278 abi_ulong flags;
2279
2280 if (get_user_ual(fd, vptr)
2281 || get_user_ual(target_msg, vptr + n)
2282 || get_user_ual(flags, vptr + 2 * n))
2283 return -TARGET_EFAULT;
2284
2285 ret = do_sendrecvmsg(fd, target_msg, flags,
2286 (num == SOCKOP_sendmsg));
2287 }
2288 break;
2289 case SOCKOP_setsockopt:
2290 {
2291 abi_ulong sockfd;
2292 abi_ulong level;
2293 abi_ulong optname;
2294 abi_ulong optval;
2295 socklen_t optlen;
2296
2297 if (get_user_ual(sockfd, vptr)
2298 || get_user_ual(level, vptr + n)
2299 || get_user_ual(optname, vptr + 2 * n)
2300 || get_user_ual(optval, vptr + 3 * n)
2301 || get_user_ual(optlen, vptr + 4 * n))
2302 return -TARGET_EFAULT;
2303
2304 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2305 }
2306 break;
2307 case SOCKOP_getsockopt:
2308 {
2309 abi_ulong sockfd;
2310 abi_ulong level;
2311 abi_ulong optname;
2312 abi_ulong optval;
2313 socklen_t optlen;
2314
2315 if (get_user_ual(sockfd, vptr)
2316 || get_user_ual(level, vptr + n)
2317 || get_user_ual(optname, vptr + 2 * n)
2318 || get_user_ual(optval, vptr + 3 * n)
2319 || get_user_ual(optlen, vptr + 4 * n))
2320 return -TARGET_EFAULT;
2321
2322 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2323 }
2324 break;
2325 default:
2326 gemu_log("Unsupported socketcall: %d\n", num);
2327 ret = -TARGET_ENOSYS;
2328 break;
2329 }
2330 return ret;
2331 }
2332 #endif
2333
2334 #define N_SHM_REGIONS 32
2335
2336 static struct shm_region {
2337 abi_ulong start;
2338 abi_ulong size;
2339 } shm_regions[N_SHM_REGIONS];
2340
2341 struct target_ipc_perm
2342 {
2343 abi_long __key;
2344 abi_ulong uid;
2345 abi_ulong gid;
2346 abi_ulong cuid;
2347 abi_ulong cgid;
2348 unsigned short int mode;
2349 unsigned short int __pad1;
2350 unsigned short int __seq;
2351 unsigned short int __pad2;
2352 abi_ulong __unused1;
2353 abi_ulong __unused2;
2354 };
2355
2356 struct target_semid_ds
2357 {
2358 struct target_ipc_perm sem_perm;
2359 abi_ulong sem_otime;
2360 abi_ulong __unused1;
2361 abi_ulong sem_ctime;
2362 abi_ulong __unused2;
2363 abi_ulong sem_nsems;
2364 abi_ulong __unused3;
2365 abi_ulong __unused4;
2366 };
2367
2368 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2369 abi_ulong target_addr)
2370 {
2371 struct target_ipc_perm *target_ip;
2372 struct target_semid_ds *target_sd;
2373
2374 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2375 return -TARGET_EFAULT;
2376 target_ip = &(target_sd->sem_perm);
2377 host_ip->__key = tswapal(target_ip->__key);
2378 host_ip->uid = tswapal(target_ip->uid);
2379 host_ip->gid = tswapal(target_ip->gid);
2380 host_ip->cuid = tswapal(target_ip->cuid);
2381 host_ip->cgid = tswapal(target_ip->cgid);
2382 host_ip->mode = tswap16(target_ip->mode);
2383 unlock_user_struct(target_sd, target_addr, 0);
2384 return 0;
2385 }
2386
2387 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2388 struct ipc_perm *host_ip)
2389 {
2390 struct target_ipc_perm *target_ip;
2391 struct target_semid_ds *target_sd;
2392
2393 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2394 return -TARGET_EFAULT;
2395 target_ip = &(target_sd->sem_perm);
2396 target_ip->__key = tswapal(host_ip->__key);
2397 target_ip->uid = tswapal(host_ip->uid);
2398 target_ip->gid = tswapal(host_ip->gid);
2399 target_ip->cuid = tswapal(host_ip->cuid);
2400 target_ip->cgid = tswapal(host_ip->cgid);
2401 target_ip->mode = tswap16(host_ip->mode);
2402 unlock_user_struct(target_sd, target_addr, 1);
2403 return 0;
2404 }
2405
2406 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2407 abi_ulong target_addr)
2408 {
2409 struct target_semid_ds *target_sd;
2410
2411 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2412 return -TARGET_EFAULT;
2413 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2414 return -TARGET_EFAULT;
2415 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2416 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2417 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2418 unlock_user_struct(target_sd, target_addr, 0);
2419 return 0;
2420 }
2421
2422 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2423 struct semid_ds *host_sd)
2424 {
2425 struct target_semid_ds *target_sd;
2426
2427 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2428 return -TARGET_EFAULT;
2429 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2430 return -TARGET_EFAULT;
2431 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2432 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2433 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2434 unlock_user_struct(target_sd, target_addr, 1);
2435 return 0;
2436 }
2437
2438 struct target_seminfo {
2439 int semmap;
2440 int semmni;
2441 int semmns;
2442 int semmnu;
2443 int semmsl;
2444 int semopm;
2445 int semume;
2446 int semusz;
2447 int semvmx;
2448 int semaem;
2449 };
2450
2451 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2452 struct seminfo *host_seminfo)
2453 {
2454 struct target_seminfo *target_seminfo;
2455 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2456 return -TARGET_EFAULT;
2457 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2458 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2459 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2460 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2461 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2462 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2463 __put_user(host_seminfo->semume, &target_seminfo->semume);
2464 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2465 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2466 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2467 unlock_user_struct(target_seminfo, target_addr, 1);
2468 return 0;
2469 }
2470
2471 union semun {
2472 int val;
2473 struct semid_ds *buf;
2474 unsigned short *array;
2475 struct seminfo *__buf;
2476 };
2477
2478 union target_semun {
2479 int val;
2480 abi_ulong buf;
2481 abi_ulong array;
2482 abi_ulong __buf;
2483 };
2484
2485 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2486 abi_ulong target_addr)
2487 {
2488 int nsems;
2489 unsigned short *array;
2490 union semun semun;
2491 struct semid_ds semid_ds;
2492 int i, ret;
2493
2494 semun.buf = &semid_ds;
2495
2496 ret = semctl(semid, 0, IPC_STAT, semun);
2497 if (ret == -1)
2498 return get_errno(ret);
2499
2500 nsems = semid_ds.sem_nsems;
2501
2502 *host_array = malloc(nsems*sizeof(unsigned short));
2503 array = lock_user(VERIFY_READ, target_addr,
2504 nsems*sizeof(unsigned short), 1);
2505 if (!array)
2506 return -TARGET_EFAULT;
2507
2508 for(i=0; i<nsems; i++) {
2509 __get_user((*host_array)[i], &array[i]);
2510 }
2511 unlock_user(array, target_addr, 0);
2512
2513 return 0;
2514 }
2515
2516 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2517 unsigned short **host_array)
2518 {
2519 int nsems;
2520 unsigned short *array;
2521 union semun semun;
2522 struct semid_ds semid_ds;
2523 int i, ret;
2524
2525 semun.buf = &semid_ds;
2526
2527 ret = semctl(semid, 0, IPC_STAT, semun);
2528 if (ret == -1)
2529 return get_errno(ret);
2530
2531 nsems = semid_ds.sem_nsems;
2532
2533 array = lock_user(VERIFY_WRITE, target_addr,
2534 nsems*sizeof(unsigned short), 0);
2535 if (!array)
2536 return -TARGET_EFAULT;
2537
2538 for(i=0; i<nsems; i++) {
2539 __put_user((*host_array)[i], &array[i]);
2540 }
2541 free(*host_array);
2542 unlock_user(array, target_addr, 1);
2543
2544 return 0;
2545 }
2546
2547 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2548 union target_semun target_su)
2549 {
2550 union semun arg;
2551 struct semid_ds dsarg;
2552 unsigned short *array = NULL;
2553 struct seminfo seminfo;
2554 abi_long ret = -TARGET_EINVAL;
2555 abi_long err;
2556 cmd &= 0xff;
2557
2558 switch( cmd ) {
2559 case GETVAL:
2560 case SETVAL:
2561 arg.val = tswap32(target_su.val);
2562 ret = get_errno(semctl(semid, semnum, cmd, arg));
2563 target_su.val = tswap32(arg.val);
2564 break;
2565 case GETALL:
2566 case SETALL:
2567 err = target_to_host_semarray(semid, &array, target_su.array);
2568 if (err)
2569 return err;
2570 arg.array = array;
2571 ret = get_errno(semctl(semid, semnum, cmd, arg));
2572 err = host_to_target_semarray(semid, target_su.array, &array);
2573 if (err)
2574 return err;
2575 break;
2576 case IPC_STAT:
2577 case IPC_SET:
2578 case SEM_STAT:
2579 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2580 if (err)
2581 return err;
2582 arg.buf = &dsarg;
2583 ret = get_errno(semctl(semid, semnum, cmd, arg));
2584 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2585 if (err)
2586 return err;
2587 break;
2588 case IPC_INFO:
2589 case SEM_INFO:
2590 arg.__buf = &seminfo;
2591 ret = get_errno(semctl(semid, semnum, cmd, arg));
2592 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2593 if (err)
2594 return err;
2595 break;
2596 case IPC_RMID:
2597 case GETPID:
2598 case GETNCNT:
2599 case GETZCNT:
2600 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2601 break;
2602 }
2603
2604 return ret;
2605 }
2606
2607 struct target_sembuf {
2608 unsigned short sem_num;
2609 short sem_op;
2610 short sem_flg;
2611 };
2612
2613 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2614 abi_ulong target_addr,
2615 unsigned nsops)
2616 {
2617 struct target_sembuf *target_sembuf;
2618 int i;
2619
2620 target_sembuf = lock_user(VERIFY_READ, target_addr,
2621 nsops*sizeof(struct target_sembuf), 1);
2622 if (!target_sembuf)
2623 return -TARGET_EFAULT;
2624
2625 for(i=0; i<nsops; i++) {
2626 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2627 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2628 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2629 }
2630
2631 unlock_user(target_sembuf, target_addr, 0);
2632
2633 return 0;
2634 }
2635
2636 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2637 {
2638 struct sembuf sops[nsops];
2639
2640 if (target_to_host_sembuf(sops, ptr, nsops))
2641 return -TARGET_EFAULT;
2642
2643 return semop(semid, sops, nsops);
2644 }
2645
2646 struct target_msqid_ds
2647 {
2648 struct target_ipc_perm msg_perm;
2649 abi_ulong msg_stime;
2650 #if TARGET_ABI_BITS == 32
2651 abi_ulong __unused1;
2652 #endif
2653 abi_ulong msg_rtime;
2654 #if TARGET_ABI_BITS == 32
2655 abi_ulong __unused2;
2656 #endif
2657 abi_ulong msg_ctime;
2658 #if TARGET_ABI_BITS == 32
2659 abi_ulong __unused3;
2660 #endif
2661 abi_ulong __msg_cbytes;
2662 abi_ulong msg_qnum;
2663 abi_ulong msg_qbytes;
2664 abi_ulong msg_lspid;
2665 abi_ulong msg_lrpid;
2666 abi_ulong __unused4;
2667 abi_ulong __unused5;
2668 };
2669
2670 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2671 abi_ulong target_addr)
2672 {
2673 struct target_msqid_ds *target_md;
2674
2675 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2676 return -TARGET_EFAULT;
2677 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2678 return -TARGET_EFAULT;
2679 host_md->msg_stime = tswapal(target_md->msg_stime);
2680 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2681 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2682 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2683 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2684 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2685 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2686 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2687 unlock_user_struct(target_md, target_addr, 0);
2688 return 0;
2689 }
2690
2691 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2692 struct msqid_ds *host_md)
2693 {
2694 struct target_msqid_ds *target_md;
2695
2696 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2697 return -TARGET_EFAULT;
2698 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2699 return -TARGET_EFAULT;
2700 target_md->msg_stime = tswapal(host_md->msg_stime);
2701 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2702 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2703 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2704 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2705 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2706 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2707 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2708 unlock_user_struct(target_md, target_addr, 1);
2709 return 0;
2710 }
2711
2712 struct target_msginfo {
2713 int msgpool;
2714 int msgmap;
2715 int msgmax;
2716 int msgmnb;
2717 int msgmni;
2718 int msgssz;
2719 int msgtql;
2720 unsigned short int msgseg;
2721 };
2722
2723 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2724 struct msginfo *host_msginfo)
2725 {
2726 struct target_msginfo *target_msginfo;
2727 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2728 return -TARGET_EFAULT;
2729 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2730 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2731 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2732 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2733 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2734 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2735 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2736 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2737 unlock_user_struct(target_msginfo, target_addr, 1);
2738 return 0;
2739 }
2740
2741 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2742 {
2743 struct msqid_ds dsarg;
2744 struct msginfo msginfo;
2745 abi_long ret = -TARGET_EINVAL;
2746
2747 cmd &= 0xff;
2748
2749 switch (cmd) {
2750 case IPC_STAT:
2751 case IPC_SET:
2752 case MSG_STAT:
2753 if (target_to_host_msqid_ds(&dsarg,ptr))
2754 return -TARGET_EFAULT;
2755 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2756 if (host_to_target_msqid_ds(ptr,&dsarg))
2757 return -TARGET_EFAULT;
2758 break;
2759 case IPC_RMID:
2760 ret = get_errno(msgctl(msgid, cmd, NULL));
2761 break;
2762 case IPC_INFO:
2763 case MSG_INFO:
2764 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2765 if (host_to_target_msginfo(ptr, &msginfo))
2766 return -TARGET_EFAULT;
2767 break;
2768 }
2769
2770 return ret;
2771 }
2772
2773 struct target_msgbuf {
2774 abi_long mtype;
2775 char mtext[1];
2776 };
2777
2778 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2779 unsigned int msgsz, int msgflg)
2780 {
2781 struct target_msgbuf *target_mb;
2782 struct msgbuf *host_mb;
2783 abi_long ret = 0;
2784
2785 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2786 return -TARGET_EFAULT;
2787 host_mb = malloc(msgsz+sizeof(long));
2788 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2789 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2790 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2791 free(host_mb);
2792 unlock_user_struct(target_mb, msgp, 0);
2793
2794 return ret;
2795 }
2796
2797 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2798 unsigned int msgsz, abi_long msgtyp,
2799 int msgflg)
2800 {
2801 struct target_msgbuf *target_mb;
2802 char *target_mtext;
2803 struct msgbuf *host_mb;
2804 abi_long ret = 0;
2805
2806 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2807 return -TARGET_EFAULT;
2808
2809 host_mb = malloc(msgsz+sizeof(long));
2810 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2811
2812 if (ret > 0) {
2813 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2814 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2815 if (!target_mtext) {
2816 ret = -TARGET_EFAULT;
2817 goto end;
2818 }
2819 memcpy(target_mb->mtext, host_mb->mtext, ret);
2820 unlock_user(target_mtext, target_mtext_addr, ret);
2821 }
2822
2823 target_mb->mtype = tswapal(host_mb->mtype);
2824 free(host_mb);
2825
2826 end:
2827 if (target_mb)
2828 unlock_user_struct(target_mb, msgp, 1);
2829 return ret;
2830 }
2831
2832 struct target_shmid_ds
2833 {
2834 struct target_ipc_perm shm_perm;
2835 abi_ulong shm_segsz;
2836 abi_ulong shm_atime;
2837 #if TARGET_ABI_BITS == 32
2838 abi_ulong __unused1;
2839 #endif
2840 abi_ulong shm_dtime;
2841 #if TARGET_ABI_BITS == 32
2842 abi_ulong __unused2;
2843 #endif
2844 abi_ulong shm_ctime;
2845 #if TARGET_ABI_BITS == 32
2846 abi_ulong __unused3;
2847 #endif
2848 int shm_cpid;
2849 int shm_lpid;
2850 abi_ulong shm_nattch;
2851 unsigned long int __unused4;
2852 unsigned long int __unused5;
2853 };
2854
2855 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2856 abi_ulong target_addr)
2857 {
2858 struct target_shmid_ds *target_sd;
2859
2860 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2861 return -TARGET_EFAULT;
2862 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2863 return -TARGET_EFAULT;
2864 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2865 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2866 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2867 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2868 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2869 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2870 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2871 unlock_user_struct(target_sd, target_addr, 0);
2872 return 0;
2873 }
2874
2875 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2876 struct shmid_ds *host_sd)
2877 {
2878 struct target_shmid_ds *target_sd;
2879
2880 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2881 return -TARGET_EFAULT;
2882 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2883 return -TARGET_EFAULT;
2884 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2885 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2886 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2887 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2888 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2889 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2890 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2891 unlock_user_struct(target_sd, target_addr, 1);
2892 return 0;
2893 }
2894
2895 struct target_shminfo {
2896 abi_ulong shmmax;
2897 abi_ulong shmmin;
2898 abi_ulong shmmni;
2899 abi_ulong shmseg;
2900 abi_ulong shmall;
2901 };
2902
2903 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2904 struct shminfo *host_shminfo)
2905 {
2906 struct target_shminfo *target_shminfo;
2907 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2908 return -TARGET_EFAULT;
2909 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2910 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2911 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2912 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2913 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2914 unlock_user_struct(target_shminfo, target_addr, 1);
2915 return 0;
2916 }
2917
2918 struct target_shm_info {
2919 int used_ids;
2920 abi_ulong shm_tot;
2921 abi_ulong shm_rss;
2922 abi_ulong shm_swp;
2923 abi_ulong swap_attempts;
2924 abi_ulong swap_successes;
2925 };
2926
2927 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2928 struct shm_info *host_shm_info)
2929 {
2930 struct target_shm_info *target_shm_info;
2931 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2932 return -TARGET_EFAULT;
2933 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2934 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2935 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2936 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2937 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2938 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2939 unlock_user_struct(target_shm_info, target_addr, 1);
2940 return 0;
2941 }
2942
2943 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2944 {
2945 struct shmid_ds dsarg;
2946 struct shminfo shminfo;
2947 struct shm_info shm_info;
2948 abi_long ret = -TARGET_EINVAL;
2949
2950 cmd &= 0xff;
2951
2952 switch(cmd) {
2953 case IPC_STAT:
2954 case IPC_SET:
2955 case SHM_STAT:
2956 if (target_to_host_shmid_ds(&dsarg, buf))
2957 return -TARGET_EFAULT;
2958 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2959 if (host_to_target_shmid_ds(buf, &dsarg))
2960 return -TARGET_EFAULT;
2961 break;
2962 case IPC_INFO:
2963 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2964 if (host_to_target_shminfo(buf, &shminfo))
2965 return -TARGET_EFAULT;
2966 break;
2967 case SHM_INFO:
2968 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2969 if (host_to_target_shm_info(buf, &shm_info))
2970 return -TARGET_EFAULT;
2971 break;
2972 case IPC_RMID:
2973 case SHM_LOCK:
2974 case SHM_UNLOCK:
2975 ret = get_errno(shmctl(shmid, cmd, NULL));
2976 break;
2977 }
2978
2979 return ret;
2980 }
2981
2982 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2983 {
2984 abi_long raddr;
2985 void *host_raddr;
2986 struct shmid_ds shm_info;
2987 int i,ret;
2988
2989 /* find out the length of the shared memory segment */
2990 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2991 if (is_error(ret)) {
2992 /* can't get length, bail out */
2993 return ret;
2994 }
2995
2996 mmap_lock();
2997
2998 if (shmaddr)
2999 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3000 else {
3001 abi_ulong mmap_start;
3002
3003 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3004
3005 if (mmap_start == -1) {
3006 errno = ENOMEM;
3007 host_raddr = (void *)-1;
3008 } else
3009 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3010 }
3011
3012 if (host_raddr == (void *)-1) {
3013 mmap_unlock();
3014 return get_errno((long)host_raddr);
3015 }
3016 raddr=h2g((unsigned long)host_raddr);
3017
3018 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3019 PAGE_VALID | PAGE_READ |
3020 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3021
3022 for (i = 0; i < N_SHM_REGIONS; i++) {
3023 if (shm_regions[i].start == 0) {
3024 shm_regions[i].start = raddr;
3025 shm_regions[i].size = shm_info.shm_segsz;
3026 break;
3027 }
3028 }
3029
3030 mmap_unlock();
3031 return raddr;
3032
3033 }
3034
3035 static inline abi_long do_shmdt(abi_ulong shmaddr)
3036 {
3037 int i;
3038
3039 for (i = 0; i < N_SHM_REGIONS; ++i) {
3040 if (shm_regions[i].start == shmaddr) {
3041 shm_regions[i].start = 0;
3042 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3043 break;
3044 }
3045 }
3046
3047 return get_errno(shmdt(g2h(shmaddr)));
3048 }
3049
3050 #ifdef TARGET_NR_ipc
3051 /* ??? This only works with linear mappings. */
3052 /* do_ipc() must return target values and target errnos. */
3053 static abi_long do_ipc(unsigned int call, int first,
3054 int second, int third,
3055 abi_long ptr, abi_long fifth)
3056 {
3057 int version;
3058 abi_long ret = 0;
3059
3060 version = call >> 16;
3061 call &= 0xffff;
3062
3063 switch (call) {
3064 case IPCOP_semop:
3065 ret = do_semop(first, ptr, second);
3066 break;
3067
3068 case IPCOP_semget:
3069 ret = get_errno(semget(first, second, third));
3070 break;
3071
3072 case IPCOP_semctl:
3073 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3074 break;
3075
3076 case IPCOP_msgget:
3077 ret = get_errno(msgget(first, second));
3078 break;
3079
3080 case IPCOP_msgsnd:
3081 ret = do_msgsnd(first, ptr, second, third);
3082 break;
3083
3084 case IPCOP_msgctl:
3085 ret = do_msgctl(first, second, ptr);
3086 break;
3087
3088 case IPCOP_msgrcv:
3089 switch (version) {
3090 case 0:
3091 {
3092 struct target_ipc_kludge {
3093 abi_long msgp;
3094 abi_long msgtyp;
3095 } *tmp;
3096
3097 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3098 ret = -TARGET_EFAULT;
3099 break;
3100 }
3101
3102 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3103
3104 unlock_user_struct(tmp, ptr, 0);
3105 break;
3106 }
3107 default:
3108 ret = do_msgrcv(first, ptr, second, fifth, third);
3109 }
3110 break;
3111
3112 case IPCOP_shmat:
3113 switch (version) {
3114 default:
3115 {
3116 abi_ulong raddr;
3117 raddr = do_shmat(first, ptr, second);
3118 if (is_error(raddr))
3119 return get_errno(raddr);
3120 if (put_user_ual(raddr, third))
3121 return -TARGET_EFAULT;
3122 break;
3123 }
3124 case 1:
3125 ret = -TARGET_EINVAL;
3126 break;
3127 }
3128 break;
3129 case IPCOP_shmdt:
3130 ret = do_shmdt(ptr);
3131 break;
3132
3133 case IPCOP_shmget:
3134 /* IPC_* flag values are the same on all linux platforms */
3135 ret = get_errno(shmget(first, second, third));
3136 break;
3137
3138 /* IPC_* and SHM_* command values are the same on all linux platforms */
3139 case IPCOP_shmctl:
3140 ret = do_shmctl(first, second, third);
3141 break;
3142 default:
3143 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3144 ret = -TARGET_ENOSYS;
3145 break;
3146 }
3147 return ret;
3148 }
3149 #endif
3150
3151 /* kernel structure types definitions */
3152
3153 #define STRUCT(name, ...) STRUCT_ ## name,
3154 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3155 enum {
3156 #include "syscall_types.h"
3157 };
3158 #undef STRUCT
3159 #undef STRUCT_SPECIAL
3160
3161 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3162 #define STRUCT_SPECIAL(name)
3163 #include "syscall_types.h"
3164 #undef STRUCT
3165 #undef STRUCT_SPECIAL
3166
3167 typedef struct IOCTLEntry IOCTLEntry;
3168
3169 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3170 int fd, abi_long cmd, abi_long arg);
3171
3172 struct IOCTLEntry {
3173 unsigned int target_cmd;
3174 unsigned int host_cmd;
3175 const char *name;
3176 int access;
3177 do_ioctl_fn *do_ioctl;
3178 const argtype arg_type[5];
3179 };
3180
3181 #define IOC_R 0x0001
3182 #define IOC_W 0x0002
3183 #define IOC_RW (IOC_R | IOC_W)
3184
3185 #define MAX_STRUCT_SIZE 4096
3186
3187 #ifdef CONFIG_FIEMAP
3188 /* So fiemap access checks don't overflow on 32 bit systems.
3189 * This is very slightly smaller than the limit imposed by
3190 * the underlying kernel.
3191 */
3192 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3193 / sizeof(struct fiemap_extent))
3194
3195 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3196 int fd, abi_long cmd, abi_long arg)
3197 {
3198 /* The parameter for this ioctl is a struct fiemap followed
3199 * by an array of struct fiemap_extent whose size is set
3200 * in fiemap->fm_extent_count. The array is filled in by the
3201 * ioctl.
3202 */
3203 int target_size_in, target_size_out;
3204 struct fiemap *fm;
3205 const argtype *arg_type = ie->arg_type;
3206 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3207 void *argptr, *p;
3208 abi_long ret;
3209 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3210 uint32_t outbufsz;
3211 int free_fm = 0;
3212
3213 assert(arg_type[0] == TYPE_PTR);
3214 assert(ie->access == IOC_RW);
3215 arg_type++;
3216 target_size_in = thunk_type_size(arg_type, 0);
3217 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3218 if (!argptr) {
3219 return -TARGET_EFAULT;
3220 }
3221 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3222 unlock_user(argptr, arg, 0);
3223 fm = (struct fiemap *)buf_temp;
3224 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3225 return -TARGET_EINVAL;
3226 }
3227
3228 outbufsz = sizeof (*fm) +
3229 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3230
3231 if (outbufsz > MAX_STRUCT_SIZE) {
3232 /* We can't fit all the extents into the fixed size buffer.
3233 * Allocate one that is large enough and use it instead.
3234 */
3235 fm = malloc(outbufsz);
3236 if (!fm) {
3237 return -TARGET_ENOMEM;
3238 }
3239 memcpy(fm, buf_temp, sizeof(struct fiemap));
3240 free_fm = 1;
3241 }
3242 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3243 if (!is_error(ret)) {
3244 target_size_out = target_size_in;
3245 /* An extent_count of 0 means we were only counting the extents
3246 * so there are no structs to copy
3247 */
3248 if (fm->fm_extent_count != 0) {
3249 target_size_out += fm->fm_mapped_extents * extent_size;
3250 }
3251 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3252 if (!argptr) {
3253 ret = -TARGET_EFAULT;
3254 } else {
3255 /* Convert the struct fiemap */
3256 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3257 if (fm->fm_extent_count != 0) {
3258 p = argptr + target_size_in;
3259 /* ...and then all the struct fiemap_extents */
3260 for (i = 0; i < fm->fm_mapped_extents; i++) {
3261 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3262 THUNK_TARGET);
3263 p += extent_size;
3264 }
3265 }
3266 unlock_user(argptr, arg, target_size_out);
3267 }
3268 }
3269 if (free_fm) {
3270 free(fm);
3271 }
3272 return ret;
3273 }
3274 #endif
3275
3276 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3277 int fd, abi_long cmd, abi_long arg)
3278 {
3279 const argtype *arg_type = ie->arg_type;
3280 int target_size;
3281 void *argptr;
3282 int ret;
3283 struct ifconf *host_ifconf;
3284 uint32_t outbufsz;
3285 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3286 int target_ifreq_size;
3287 int nb_ifreq;
3288 int free_buf = 0;
3289 int i;
3290 int target_ifc_len;
3291 abi_long target_ifc_buf;
3292 int host_ifc_len;
3293 char *host_ifc_buf;
3294
3295 assert(arg_type[0] == TYPE_PTR);
3296 assert(ie->access == IOC_RW);
3297
3298 arg_type++;
3299 target_size = thunk_type_size(arg_type, 0);
3300
3301 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3302 if (!argptr)
3303 return -TARGET_EFAULT;
3304 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3305 unlock_user(argptr, arg, 0);
3306
3307 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3308 target_ifc_len = host_ifconf->ifc_len;
3309 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3310
3311 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3312 nb_ifreq = target_ifc_len / target_ifreq_size;
3313 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3314
3315 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3316 if (outbufsz > MAX_STRUCT_SIZE) {
3317 /* We can't fit all the extents into the fixed size buffer.
3318 * Allocate one that is large enough and use it instead.
3319 */
3320 host_ifconf = malloc(outbufsz);
3321 if (!host_ifconf) {
3322 return -TARGET_ENOMEM;
3323 }
3324 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3325 free_buf = 1;
3326 }
3327 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3328
3329 host_ifconf->ifc_len = host_ifc_len;
3330 host_ifconf->ifc_buf = host_ifc_buf;
3331
3332 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3333 if (!is_error(ret)) {
3334 /* convert host ifc_len to target ifc_len */
3335
3336 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3337 target_ifc_len = nb_ifreq * target_ifreq_size;
3338 host_ifconf->ifc_len = target_ifc_len;
3339
3340 /* restore target ifc_buf */
3341
3342 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3343
3344 /* copy struct ifconf to target user */
3345
3346 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3347 if (!argptr)
3348 return -TARGET_EFAULT;
3349 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3350 unlock_user(argptr, arg, target_size);
3351
3352 /* copy ifreq[] to target user */
3353
3354 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3355 for (i = 0; i < nb_ifreq ; i++) {
3356 thunk_convert(argptr + i * target_ifreq_size,
3357 host_ifc_buf + i * sizeof(struct ifreq),
3358 ifreq_arg_type, THUNK_TARGET);
3359 }
3360 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3361 }
3362
3363 if (free_buf) {
3364 free(host_ifconf);
3365 }
3366
3367 return ret;
3368 }
3369
3370 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3371 abi_long cmd, abi_long arg)
3372 {
3373 void *argptr;
3374 struct dm_ioctl *host_dm;
3375 abi_long guest_data;
3376 uint32_t guest_data_size;
3377 int target_size;
3378 const argtype *arg_type = ie->arg_type;
3379 abi_long ret;
3380 void *big_buf = NULL;
3381 char *host_data;
3382
3383 arg_type++;
3384 target_size = thunk_type_size(arg_type, 0);
3385 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3386 if (!argptr) {
3387 ret = -TARGET_EFAULT;
3388 goto out;
3389 }
3390 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3391 unlock_user(argptr, arg, 0);
3392
3393 /* buf_temp is too small, so fetch things into a bigger buffer */
3394 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3395 memcpy(big_buf, buf_temp, target_size);
3396 buf_temp = big_buf;
3397 host_dm = big_buf;
3398
3399 guest_data = arg + host_dm->data_start;
3400 if ((guest_data - arg) < 0) {
3401 ret = -EINVAL;
3402 goto out;
3403 }
3404 guest_data_size = host_dm->data_size - host_dm->data_start;
3405 host_data = (char*)host_dm + host_dm->data_start;
3406
3407 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3408 switch (ie->host_cmd) {
3409 case DM_REMOVE_ALL:
3410 case DM_LIST_DEVICES:
3411 case DM_DEV_CREATE:
3412 case DM_DEV_REMOVE:
3413 case DM_DEV_SUSPEND:
3414 case DM_DEV_STATUS:
3415 case DM_DEV_WAIT:
3416 case DM_TABLE_STATUS:
3417 case DM_TABLE_CLEAR:
3418 case DM_TABLE_DEPS:
3419 case DM_LIST_VERSIONS:
3420 /* no input data */
3421 break;
3422 case DM_DEV_RENAME:
3423 case DM_DEV_SET_GEOMETRY:
3424 /* data contains only strings */
3425 memcpy(host_data, argptr, guest_data_size);
3426 break;
3427 case DM_TARGET_MSG:
3428 memcpy(host_data, argptr, guest_data_size);
3429 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3430 break;
3431 case DM_TABLE_LOAD:
3432 {
3433 void *gspec = argptr;
3434 void *cur_data = host_data;
3435 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3436 int spec_size = thunk_type_size(arg_type, 0);
3437 int i;
3438
3439 for (i = 0; i < host_dm->target_count; i++) {
3440 struct dm_target_spec *spec = cur_data;
3441 uint32_t next;
3442 int slen;
3443
3444 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3445 slen = strlen((char*)gspec + spec_size) + 1;
3446 next = spec->next;
3447 spec->next = sizeof(*spec) + slen;
3448 strcpy((char*)&spec[1], gspec + spec_size);
3449 gspec += next;
3450 cur_data += spec->next;
3451 }
3452 break;
3453 }
3454 default:
3455 ret = -TARGET_EINVAL;
3456 goto out;
3457 }
3458 unlock_user(argptr, guest_data, 0);
3459
3460 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3461 if (!is_error(ret)) {
3462 guest_data = arg + host_dm->data_start;
3463 guest_data_size = host_dm->data_size - host_dm->data_start;
3464 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3465 switch (ie->host_cmd) {
3466 case DM_REMOVE_ALL:
3467 case DM_DEV_CREATE:
3468 case DM_DEV_REMOVE:
3469 case DM_DEV_RENAME:
3470 case DM_DEV_SUSPEND:
3471 case DM_DEV_STATUS:
3472 case DM_TABLE_LOAD:
3473 case DM_TABLE_CLEAR:
3474 case DM_TARGET_MSG:
3475 case DM_DEV_SET_GEOMETRY:
3476 /* no return data */
3477 break;
3478 case DM_LIST_DEVICES:
3479 {
3480 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3481 uint32_t remaining_data = guest_data_size;
3482 void *cur_data = argptr;
3483 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3484 int nl_size = 12; /* can't use thunk_size due to alignment */
3485
3486 while (1) {
3487 uint32_t next = nl->next;
3488 if (next) {
3489 nl->next = nl_size + (strlen(nl->name) + 1);
3490 }
3491 if (remaining_data < nl->next) {
3492 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3493 break;
3494 }
3495 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3496 strcpy(cur_data + nl_size, nl->name);
3497 cur_data += nl->next;
3498 remaining_data -= nl->next;
3499 if (!next) {
3500 break;
3501 }
3502 nl = (void*)nl + next;
3503 }
3504 break;
3505 }
3506 case DM_DEV_WAIT:
3507 case DM_TABLE_STATUS:
3508 {
3509 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3510 void *cur_data = argptr;
3511 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3512 int spec_size = thunk_type_size(arg_type, 0);
3513 int i;
3514
3515 for (i = 0; i < host_dm->target_count; i++) {
3516 uint32_t next = spec->next;
3517 int slen = strlen((char*)&spec[1]) + 1;
3518 spec->next = (cur_data - argptr) + spec_size + slen;
3519 if (guest_data_size < spec->next) {
3520 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3521 break;
3522 }
3523 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3524 strcpy(cur_data + spec_size, (char*)&spec[1]);
3525 cur_data = argptr + spec->next;
3526 spec = (void*)host_dm + host_dm->data_start + next;
3527 }
3528 break;
3529 }
3530 case DM_TABLE_DEPS:
3531 {
3532 void *hdata = (void*)host_dm + host_dm->data_start;
3533 int count = *(uint32_t*)hdata;
3534 uint64_t *hdev = hdata + 8;
3535 uint64_t *gdev = argptr + 8;
3536 int i;
3537
3538 *(uint32_t*)argptr = tswap32(count);
3539 for (i = 0; i < count; i++) {
3540 *gdev = tswap64(*hdev);
3541 gdev++;
3542 hdev++;
3543 }
3544 break;
3545 }
3546 case DM_LIST_VERSIONS:
3547 {
3548 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3549 uint32_t remaining_data = guest_data_size;
3550 void *cur_data = argptr;
3551 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3552 int vers_size = thunk_type_size(arg_type, 0);
3553
3554 while (1) {
3555 uint32_t next = vers->next;
3556 if (next) {
3557 vers->next = vers_size + (strlen(vers->name) + 1);
3558 }
3559 if (remaining_data < vers->next) {
3560 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3561 break;
3562 }
3563 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3564 strcpy(cur_data + vers_size, vers->name);
3565 cur_data += vers->next;
3566 remaining_data -= vers->next;
3567 if (!next) {
3568 break;
3569 }
3570 vers = (void*)vers + next;
3571 }
3572 break;
3573 }
3574 default:
3575 ret = -TARGET_EINVAL;
3576 goto out;
3577 }
3578 unlock_user(argptr, guest_data, guest_data_size);
3579
3580 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3581 if (!argptr) {
3582 ret = -TARGET_EFAULT;
3583 goto out;
3584 }
3585 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3586 unlock_user(argptr, arg, target_size);
3587 }
3588 out:
3589 if (big_buf) {
3590 free(big_buf);
3591 }
3592 return ret;
3593 }
3594
3595 static IOCTLEntry ioctl_entries[] = {
3596 #define IOCTL(cmd, access, ...) \
3597 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3598 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3599 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3600 #include "ioctls.h"
3601 { 0, 0, },
3602 };
3603
3604 /* ??? Implement proper locking for ioctls. */
3605 /* do_ioctl() Must return target values and target errnos. */
3606 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3607 {
3608 const IOCTLEntry *ie;
3609 const argtype *arg_type;
3610 abi_long ret;
3611 uint8_t buf_temp[MAX_STRUCT_SIZE];
3612 int target_size;
3613 void *argptr;
3614
3615 ie = ioctl_entries;
3616 for(;;) {
3617 if (ie->target_cmd == 0) {
3618 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3619 return -TARGET_ENOSYS;
3620 }
3621 if (ie->target_cmd == cmd)
3622 break;
3623 ie++;
3624 }
3625 arg_type = ie->arg_type;
3626 #if defined(DEBUG)
3627 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3628 #endif
3629 if (ie->do_ioctl) {
3630 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3631 }
3632
3633 switch(arg_type[0]) {
3634 case TYPE_NULL:
3635 /* no argument */
3636 ret = get_errno(ioctl(fd, ie->host_cmd));
3637 break;
3638 case TYPE_PTRVOID:
3639 case TYPE_INT:
3640 /* int argment */
3641 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3642 break;
3643 case TYPE_PTR:
3644 arg_type++;
3645 target_size = thunk_type_size(arg_type, 0);
3646 switch(ie->access) {
3647 case IOC_R:
3648 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3649 if (!is_error(ret)) {
3650 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3651 if (!argptr)
3652 return -TARGET_EFAULT;
3653 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3654 unlock_user(argptr, arg, target_size);
3655 }
3656 break;
3657 case IOC_W:
3658 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3659 if (!argptr)
3660 return -TARGET_EFAULT;
3661 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3662 unlock_user(argptr, arg, 0);
3663 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3664 break;
3665 default:
3666 case IOC_RW:
3667 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3668 if (!argptr)
3669 return -TARGET_EFAULT;
3670 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3671 unlock_user(argptr, arg, 0);
3672 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3673 if (!is_error(ret)) {
3674 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3675 if (!argptr)
3676 return -TARGET_EFAULT;
3677 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3678 unlock_user(argptr, arg, target_size);
3679 }
3680 break;
3681 }
3682 break;
3683 default:
3684 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3685 (long)cmd, arg_type[0]);
3686 ret = -TARGET_ENOSYS;
3687 break;
3688 }
3689 return ret;
3690 }
3691
3692 static const bitmask_transtbl iflag_tbl[] = {
3693 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3694 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3695 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3696 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3697 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3698 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3699 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3700 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3701 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3702 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3703 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3704 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3705 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3706 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3707 { 0, 0, 0, 0 }
3708 };
3709
3710 static const bitmask_transtbl oflag_tbl[] = {
3711 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3712 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3713 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3714 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3715 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3716 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3717 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3718 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3719 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3720 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3721 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3722 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3723 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3724 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3725 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3726 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3727 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3728 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3729 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3730 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3731 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3732 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3733 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3734 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3735 { 0, 0, 0, 0 }
3736 };
3737
3738 static const bitmask_transtbl cflag_tbl[] = {
3739 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3740 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3741 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3742 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3743 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3744 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3745 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3746 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3747 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3748 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3749 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3750 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3751 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3752 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3753 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3754 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3755 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3756 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3757 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3758 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3759 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3760 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3761 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3762 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3763 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3764 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3765 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3766 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3767 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3768 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3769 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3770 { 0, 0, 0, 0 }
3771 };
3772
3773 static const bitmask_transtbl lflag_tbl[] = {
3774 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3775 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3776 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3777 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3778 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3779 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3780 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3781 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3782 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3783 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3784 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3785 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3786 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3787 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3788 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3789 { 0, 0, 0, 0 }
3790 };
3791
3792 static void target_to_host_termios (void *dst, const void *src)
3793 {
3794 struct host_termios *host = dst;
3795 const struct target_termios *target = src;
3796
3797 host->c_iflag =
3798 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3799 host->c_oflag =
3800 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3801 host->c_cflag =
3802 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3803 host->c_lflag =
3804 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3805 host->c_line = target->c_line;
3806
3807 memset(host->c_cc, 0, sizeof(host->c_cc));
3808 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3809 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3810 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3811 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3812 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3813 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3814 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3815 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3816 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3817 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3818 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3819 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3820 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3821 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3822 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3823 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3824 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3825 }
3826
3827 static void host_to_target_termios (void *dst, const void *src)
3828 {
3829 struct target_termios *target = dst;
3830 const struct host_termios *host = src;
3831
3832 target->c_iflag =
3833 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3834 target->c_oflag =
3835 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3836 target->c_cflag =
3837 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3838 target->c_lflag =
3839 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3840 target->c_line = host->c_line;
3841
3842 memset(target->c_cc, 0, sizeof(target->c_cc));
3843 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3844 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3845 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3846 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3847 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3848 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3849 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3850 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3851 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3852 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3853 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3854 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3855 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3856 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3857 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3858 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3859 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3860 }
3861
3862 static const StructEntry struct_termios_def = {
3863 .convert = { host_to_target_termios, target_to_host_termios },
3864 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3865 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3866 };
3867
3868 static bitmask_transtbl mmap_flags_tbl[] = {
3869 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3870 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3871 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3872 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3873 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3874 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3875 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3876 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3877 { 0, 0, 0, 0 }
3878 };
3879
3880 #if defined(TARGET_I386)
3881
3882 /* NOTE: there is really one LDT for all the threads */
3883 static uint8_t *ldt_table;
3884
3885 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3886 {
3887 int size;
3888 void *p;
3889
3890 if (!ldt_table)
3891 return 0;
3892 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3893 if (size > bytecount)
3894 size = bytecount;
3895 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3896 if (!p)
3897 return -TARGET_EFAULT;
3898 /* ??? Should this by byteswapped? */
3899 memcpy(p, ldt_table, size);
3900 unlock_user(p, ptr, size);
3901 return size;
3902 }
3903
3904 /* XXX: add locking support */
3905 static abi_long write_ldt(CPUX86State *env,
3906 abi_ulong ptr, unsigned long bytecount, int oldmode)
3907 {
3908 struct target_modify_ldt_ldt_s ldt_info;
3909 struct target_modify_ldt_ldt_s *target_ldt_info;
3910 int seg_32bit, contents, read_exec_only, limit_in_pages;
3911 int seg_not_present, useable, lm;
3912 uint32_t *lp, entry_1, entry_2;
3913
3914 if (bytecount != sizeof(ldt_info))
3915 return -TARGET_EINVAL;
3916 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3917 return -TARGET_EFAULT;
3918 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3919 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3920 ldt_info.limit = tswap32(target_ldt_info->limit);
3921 ldt_info.flags = tswap32(target_ldt_info->flags);
3922 unlock_user_struct(target_ldt_info, ptr, 0);
3923
3924 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3925 return -TARGET_EINVAL;
3926 seg_32bit = ldt_info.flags & 1;
3927 contents = (ldt_info.flags >> 1) & 3;
3928 read_exec_only = (ldt_info.flags >> 3) & 1;
3929 limit_in_pages = (ldt_info.flags >> 4) & 1;
3930 seg_not_present = (ldt_info.flags >> 5) & 1;
3931 useable = (ldt_info.flags >> 6) & 1;
3932 #ifdef TARGET_ABI32
3933 lm = 0;
3934 #else
3935 lm = (ldt_info.flags >> 7) & 1;
3936 #endif
3937 if (contents == 3) {
3938 if (oldmode)
3939 return -TARGET_EINVAL;
3940 if (seg_not_present == 0)
3941 return -TARGET_EINVAL;
3942 }
3943 /* allocate the LDT */
3944 if (!ldt_table) {
3945 env->ldt.base = target_mmap(0,
3946 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3947 PROT_READ|PROT_WRITE,
3948 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3949 if (env->ldt.base == -1)
3950 return -TARGET_ENOMEM;
3951 memset(g2h(env->ldt.base), 0,
3952 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3953 env->ldt.limit = 0xffff;
3954 ldt_table = g2h(env->ldt.base);
3955 }
3956
3957 /* NOTE: same code as Linux kernel */
3958 /* Allow LDTs to be cleared by the user. */
3959 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3960 if (oldmode ||
3961 (contents == 0 &&
3962 read_exec_only == 1 &&
3963 seg_32bit == 0 &&
3964 limit_in_pages == 0 &&
3965 seg_not_present == 1 &&
3966 useable == 0 )) {
3967 entry_1 = 0;
3968 entry_2 = 0;
3969 goto install;
3970 }
3971 }
3972
3973 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3974 (ldt_info.limit & 0x0ffff);
3975 entry_2 = (ldt_info.base_addr & 0xff000000) |
3976 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3977 (ldt_info.limit & 0xf0000) |
3978 ((read_exec_only ^ 1) << 9) |
3979 (contents << 10) |
3980 ((seg_not_present ^ 1) << 15) |
3981 (seg_32bit << 22) |
3982 (limit_in_pages << 23) |
3983 (lm << 21) |
3984 0x7000;
3985 if (!oldmode)
3986 entry_2 |= (useable << 20);
3987
3988 /* Install the new entry ... */
3989 install:
3990 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3991 lp[0] = tswap32(entry_1);
3992 lp[1] = tswap32(entry_2);
3993 return 0;
3994 }
3995
3996 /* specific and weird i386 syscalls */
3997 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3998 unsigned long bytecount)
3999 {
4000 abi_long ret;
4001
4002 switch (func) {
4003 case 0:
4004 ret = read_ldt(ptr, bytecount);
4005 break;
4006 case 1:
4007 ret = write_ldt(env, ptr, bytecount, 1);
4008 break;
4009 case 0x11:
4010 ret = write_ldt(env, ptr, bytecount, 0);
4011 break;
4012 default:
4013 ret = -TARGET_ENOSYS;
4014 break;
4015 }
4016 return ret;
4017 }
4018
4019 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4020 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4021 {
4022 uint64_t *gdt_table = g2h(env->gdt.base);
4023 struct target_modify_ldt_ldt_s ldt_info;
4024 struct target_modify_ldt_ldt_s *target_ldt_info;
4025 int seg_32bit, contents, read_exec_only, limit_in_pages;
4026 int seg_not_present, useable, lm;
4027 uint32_t *lp, entry_1, entry_2;
4028 int i;
4029
4030 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4031 if (!target_ldt_info)
4032 return -TARGET_EFAULT;
4033 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4034 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4035 ldt_info.limit = tswap32(target_ldt_info->limit);
4036 ldt_info.flags = tswap32(target_ldt_info->flags);
4037 if (ldt_info.entry_number == -1) {
4038 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4039 if (gdt_table[i] == 0) {
4040 ldt_info.entry_number = i;
4041 target_ldt_info->entry_number = tswap32(i);
4042 break;
4043 }
4044 }
4045 }
4046 unlock_user_struct(target_ldt_info, ptr, 1);
4047
4048 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4049 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4050 return -TARGET_EINVAL;
4051 seg_32bit = ldt_info.flags & 1;
4052 contents = (ldt_info.flags >> 1) & 3;
4053 read_exec_only = (ldt_info.flags >> 3) & 1;
4054 limit_in_pages = (ldt_info.flags >> 4) & 1;
4055 seg_not_present = (ldt_info.flags >> 5) & 1;
4056 useable = (ldt_info.flags >> 6) & 1;
4057 #ifdef TARGET_ABI32
4058 lm = 0;
4059 #else
4060 lm = (ldt_info.flags >> 7) & 1;
4061 #endif
4062
4063 if (contents == 3) {
4064 if (seg_not_present == 0)
4065 return -TARGET_EINVAL;
4066 }
4067
4068 /* NOTE: same code as Linux kernel */
4069 /* Allow LDTs to be cleared by the user. */
4070 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4071 if ((contents == 0 &&
4072 read_exec_only == 1 &&
4073 seg_32bit == 0 &&
4074 limit_in_pages == 0 &&
4075 seg_not_present == 1 &&
4076 useable == 0 )) {
4077 entry_1 = 0;
4078 entry_2 = 0;
4079 goto install;
4080 }
4081 }
4082
4083 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4084 (ldt_info.limit & 0x0ffff);
4085 entry_2 = (ldt_info.base_addr & 0xff000000) |
4086 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4087 (ldt_info.limit & 0xf0000) |
4088 ((read_exec_only ^ 1) << 9) |
4089 (contents << 10) |
4090 ((seg_not_present ^ 1) << 15) |
4091 (seg_32bit << 22) |
4092 (limit_in_pages << 23) |
4093 (useable << 20) |
4094 (lm << 21) |
4095 0x7000;
4096
4097 /* Install the new entry ... */
4098 install:
4099 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4100 lp[0] = tswap32(entry_1);
4101 lp[1] = tswap32(entry_2);
4102 return 0;
4103 }
4104
4105 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4106 {
4107 struct target_modify_ldt_ldt_s *target_ldt_info;
4108 uint64_t *gdt_table = g2h(env->gdt.base);
4109 uint32_t base_addr, limit, flags;
4110 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4111 int seg_not_present, useable, lm;
4112 uint32_t *lp, entry_1, entry_2;
4113
4114 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4115 if (!target_ldt_info)
4116 return -TARGET_EFAULT;
4117 idx = tswap32(target_ldt_info->entry_number);
4118 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4119 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4120 unlock_user_struct(target_ldt_info, ptr, 1);
4121 return -TARGET_EINVAL;
4122 }
4123 lp = (uint32_t *)(gdt_table + idx);
4124 entry_1 = tswap32(lp[0]);
4125 entry_2 = tswap32(lp[1]);
4126
4127 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4128 contents = (entry_2 >> 10) & 3;
4129 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4130 seg_32bit = (entry_2 >> 22) & 1;
4131 limit_in_pages = (entry_2 >> 23) & 1;
4132 useable = (entry_2 >> 20) & 1;
4133 #ifdef TARGET_ABI32
4134 lm = 0;
4135 #else
4136 lm = (entry_2 >> 21) & 1;
4137 #endif
4138 flags = (seg_32bit << 0) | (contents << 1) |
4139 (read_exec_only << 3) | (limit_in_pages << 4) |
4140 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4141 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4142 base_addr = (entry_1 >> 16) |
4143 (entry_2 & 0xff000000) |
4144 ((entry_2 & 0xff) << 16);
4145 target_ldt_info->base_addr = tswapal(base_addr);
4146 target_ldt_info->limit = tswap32(limit);
4147 target_ldt_info->flags = tswap32(flags);
4148 unlock_user_struct(target_ldt_info, ptr, 1);
4149 return 0;
4150 }
4151 #endif /* TARGET_I386 && TARGET_ABI32 */
4152
4153 #ifndef TARGET_ABI32
4154 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4155 {
4156 abi_long ret = 0;
4157 abi_ulong val;
4158 int idx;
4159
4160 switch(code) {
4161 case TARGET_ARCH_SET_GS:
4162 case TARGET_ARCH_SET_FS:
4163 if (code == TARGET_ARCH_SET_GS)
4164 idx = R_GS;
4165 else
4166 idx = R_FS;
4167 cpu_x86_load_seg(env, idx, 0);
4168 env->segs[idx].base = addr;
4169 break;
4170 case TARGET_ARCH_GET_GS:
4171 case TARGET_ARCH_GET_FS:
4172 if (code == TARGET_ARCH_GET_GS)
4173 idx = R_GS;
4174 else
4175 idx = R_FS;
4176 val = env->segs[idx].base;
4177 if (put_user(val, addr, abi_ulong))
4178 ret = -TARGET_EFAULT;
4179 break;
4180 default:
4181 ret = -TARGET_EINVAL;
4182 break;
4183 }
4184 return ret;
4185 }
4186 #endif
4187
4188 #endif /* defined(TARGET_I386) */
4189
4190 #define NEW_STACK_SIZE 0x40000
4191
4192 #if defined(CONFIG_USE_NPTL)
4193
4194 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4195 typedef struct {
4196 CPUArchState *env;
4197 pthread_mutex_t mutex;
4198 pthread_cond_t cond;
4199 pthread_t thread;
4200 uint32_t tid;
4201 abi_ulong child_tidptr;
4202 abi_ulong parent_tidptr;
4203 sigset_t sigmask;
4204 } new_thread_info;
4205
4206 static void *clone_func(void *arg)
4207 {
4208 new_thread_info *info = arg;
4209 CPUArchState *env;
4210 TaskState *ts;
4211
4212 env = info->env;
4213 thread_env = env;
4214 ts = (TaskState *)thread_env->opaque;
4215 info->tid = gettid();
4216 env->host_tid = info->tid;
4217 task_settid(ts);
4218 if (info->child_tidptr)
4219 put_user_u32(info->tid, info->child_tidptr);
4220 if (info->parent_tidptr)
4221 put_user_u32(info->tid, info->parent_tidptr);
4222 /* Enable signals. */
4223 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4224 /* Signal to the parent that we're ready. */
4225 pthread_mutex_lock(&info->mutex);
4226 pthread_cond_broadcast(&info->cond);
4227 pthread_mutex_unlock(&info->mutex);
4228 /* Wait until the parent has finshed initializing the tls state. */
4229 pthread_mutex_lock(&clone_lock);
4230 pthread_mutex_unlock(&clone_lock);
4231 cpu_loop(env);
4232 /* never exits */
4233 return NULL;
4234 }
4235 #else
4236
4237 static int clone_func(void *arg)
4238 {
4239 CPUArchState *env = arg;
4240 cpu_loop(env);
4241 /* never exits */
4242 return 0;
4243 }
4244 #endif
4245
4246 /* do_fork() Must return host values and target errnos (unlike most
4247 do_*() functions). */
4248 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4249 abi_ulong parent_tidptr, target_ulong newtls,
4250 abi_ulong child_tidptr)
4251 {
4252 int ret;
4253 TaskState *ts;
4254 CPUArchState *new_env;
4255 #if defined(CONFIG_USE_NPTL)
4256 unsigned int nptl_flags;
4257 sigset_t sigmask;
4258 #else
4259 uint8_t *new_stack;
4260 #endif
4261
4262 /* Emulate vfork() with fork() */
4263 if (flags & CLONE_VFORK)
4264 flags &= ~(CLONE_VFORK | CLONE_VM);
4265
4266 if (flags & CLONE_VM) {
4267 TaskState *parent_ts = (TaskState *)env->opaque;
4268 #if defined(CONFIG_USE_NPTL)
4269 new_thread_info info;
4270 pthread_attr_t attr;
4271 #endif
4272 ts = g_malloc0(sizeof(TaskState));
4273 init_task_state(ts);
4274 /* we create a new CPU instance. */
4275 new_env = cpu_copy(env);
4276 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4277 cpu_reset(ENV_GET_CPU(new_env));
4278 #endif
4279 /* Init regs that differ from the parent. */
4280 cpu_clone_regs(new_env, newsp);
4281 new_env->opaque = ts;
4282 ts->bprm = parent_ts->bprm;
4283 ts->info = parent_ts->info;
4284 #if defined(CONFIG_USE_NPTL)
4285 nptl_flags = flags;
4286 flags &= ~CLONE_NPTL_FLAGS2;
4287
4288 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4289 ts->child_tidptr = child_tidptr;
4290 }
4291
4292 if (nptl_flags & CLONE_SETTLS)
4293 cpu_set_tls (new_env, newtls);
4294
4295 /* Grab a mutex so that thread setup appears atomic. */
4296 pthread_mutex_lock(&clone_lock);
4297
4298 memset(&info, 0, sizeof(info));
4299 pthread_mutex_init(&info.mutex, NULL);
4300 pthread_mutex_lock(&info.mutex);
4301 pthread_cond_init(&info.cond, NULL);
4302 info.env = new_env;
4303 if (nptl_flags & CLONE_CHILD_SETTID)
4304 info.child_tidptr = child_tidptr;
4305 if (nptl_flags & CLONE_PARENT_SETTID)
4306 info.parent_tidptr = parent_tidptr;
4307
4308 ret = pthread_attr_init(&attr);
4309 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4310 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4311 /* It is not safe to deliver signals until the child has finished
4312 initializing, so temporarily block all signals. */
4313 sigfillset(&sigmask);
4314 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4315
4316 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4317 /* TODO: Free new CPU state if thread creation failed. */
4318
4319 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4320 pthread_attr_destroy(&attr);
4321 if (ret == 0) {
4322 /* Wait for the child to initialize. */
4323 pthread_cond_wait(&info.cond, &info.mutex);
4324 ret = info.tid;
4325 if (flags & CLONE_PARENT_SETTID)
4326 put_user_u32(ret, parent_tidptr);
4327 } else {
4328 ret = -1;
4329 }
4330 pthread_mutex_unlock(&info.mutex);
4331 pthread_cond_destroy(&info.cond);
4332 pthread_mutex_destroy(&info.mutex);
4333 pthread_mutex_unlock(&clone_lock);
4334 #else
4335 if (flags & CLONE_NPTL_FLAGS2)
4336 return -EINVAL;
4337 /* This is probably going to die very quickly, but do it anyway. */
4338 new_stack = g_malloc0 (NEW_STACK_SIZE);
4339 #ifdef __ia64__
4340 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4341 #else
4342 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4343 #endif
4344 #endif
4345 } else {
4346 /* if no CLONE_VM, we consider it is a fork */
4347 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4348 return -EINVAL;
4349 fork_start();
4350 ret = fork();
4351 if (ret == 0) {
4352 /* Child Process. */
4353 cpu_clone_regs(env, newsp);
4354 fork_end(1);
4355 #if defined(CONFIG_USE_NPTL)
4356 /* There is a race condition here. The parent process could
4357 theoretically read the TID in the child process before the child
4358 tid is set. This would require using either ptrace
4359 (not implemented) or having *_tidptr to point at a shared memory
4360 mapping. We can't repeat the spinlock hack used above because
4361 the child process gets its own copy of the lock. */
4362 if (flags & CLONE_CHILD_SETTID)
4363 put_user_u32(gettid(), child_tidptr);
4364 if (flags & CLONE_PARENT_SETTID)
4365 put_user_u32(gettid(), parent_tidptr);
4366 ts = (TaskState *)env->opaque;
4367 if (flags & CLONE_SETTLS)
4368 cpu_set_tls (env, newtls);
4369 if (flags & CLONE_CHILD_CLEARTID)
4370 ts->child_tidptr = child_tidptr;
4371 #endif
4372 } else {
4373 fork_end(0);
4374 }
4375 }
4376 return ret;
4377 }
4378
4379 /* warning : doesn't handle linux specific flags... */
4380 static int target_to_host_fcntl_cmd(int cmd)
4381 {
4382 switch(cmd) {
4383 case TARGET_F_DUPFD:
4384 case TARGET_F_GETFD:
4385 case TARGET_F_SETFD:
4386 case TARGET_F_GETFL:
4387 case TARGET_F_SETFL:
4388 return cmd;
4389 case TARGET_F_GETLK:
4390 return F_GETLK;
4391 case TARGET_F_SETLK:
4392 return F_SETLK;
4393 case TARGET_F_SETLKW:
4394 return F_SETLKW;
4395 case TARGET_F_GETOWN:
4396 return F_GETOWN;
4397 case TARGET_F_SETOWN:
4398 return F_SETOWN;
4399 case TARGET_F_GETSIG:
4400 return F_GETSIG;
4401 case TARGET_F_SETSIG:
4402 return F_SETSIG;
4403 #if TARGET_ABI_BITS == 32
4404 case TARGET_F_GETLK64:
4405 return F_GETLK64;
4406 case TARGET_F_SETLK64:
4407 return F_SETLK64;
4408 case TARGET_F_SETLKW64:
4409 return F_SETLKW64;
4410 #endif
4411 case TARGET_F_SETLEASE:
4412 return F_SETLEASE;
4413 case TARGET_F_GETLEASE:
4414 return F_GETLEASE;
4415 #ifdef F_DUPFD_CLOEXEC
4416 case TARGET_F_DUPFD_CLOEXEC:
4417 return F_DUPFD_CLOEXEC;
4418 #endif
4419 case TARGET_F_NOTIFY:
4420 return F_NOTIFY;
4421 default:
4422 return -TARGET_EINVAL;
4423 }
4424 return -TARGET_EINVAL;
4425 }
4426
4427 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4428 {
4429 struct flock fl;
4430 struct target_flock *target_fl;
4431 struct flock64 fl64;
4432 struct target_flock64 *target_fl64;
4433 abi_long ret;
4434 int host_cmd = target_to_host_fcntl_cmd(cmd);
4435
4436 if (host_cmd == -TARGET_EINVAL)
4437 return host_cmd;
4438
4439 switch(cmd) {
4440 case TARGET_F_GETLK:
4441 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4442 return -TARGET_EFAULT;
4443 fl.l_type = tswap16(target_fl->l_type);
4444 fl.l_whence = tswap16(target_fl->l_whence);
4445 fl.l_start = tswapal(target_fl->l_start);
4446 fl.l_len = tswapal(target_fl->l_len);
4447 fl.l_pid = tswap32(target_fl->l_pid);
4448 unlock_user_struct(target_fl, arg, 0);
4449 ret = get_errno(fcntl(fd, host_cmd, &fl));
4450 if (ret == 0) {
4451 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4452 return -TARGET_EFAULT;
4453 target_fl->l_type = tswap16(fl.l_type);
4454 target_fl->l_whence = tswap16(fl.l_whence);
4455 target_fl->l_start = tswapal(fl.l_start);
4456 target_fl->l_len = tswapal(fl.l_len);
4457 target_fl->l_pid = tswap32(fl.l_pid);
4458 unlock_user_struct(target_fl, arg, 1);
4459 }
4460 break;
4461
4462 case TARGET_F_SETLK:
4463 case TARGET_F_SETLKW:
4464 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4465 return -TARGET_EFAULT;
4466 fl.l_type = tswap16(target_fl->l_type);
4467 fl.l_whence = tswap16(target_fl->l_whence);
4468 fl.l_start = tswapal(target_fl->l_start);
4469 fl.l_len = tswapal(target_fl->l_len);
4470 fl.l_pid = tswap32(target_fl->l_pid);
4471 unlock_user_struct(target_fl, arg, 0);
4472 ret = get_errno(fcntl(fd, host_cmd, &fl));
4473 break;
4474
4475 case TARGET_F_GETLK64:
4476 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4477 return -TARGET_EFAULT;
4478 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4479 fl64.l_whence = tswap16(target_fl64->l_whence);
4480 fl64.l_start = tswap64(target_fl64->l_start);
4481 fl64.l_len = tswap64(target_fl64->l_len);
4482 fl64.l_pid = tswap32(target_fl64->l_pid);
4483 unlock_user_struct(target_fl64, arg, 0);
4484 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4485 if (ret == 0) {
4486 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4487 return -TARGET_EFAULT;
4488 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4489 target_fl64->l_whence = tswap16(fl64.l_whence);
4490 target_fl64->l_start = tswap64(fl64.l_start);
4491 target_fl64->l_len = tswap64(fl64.l_len);
4492 target_fl64->l_pid = tswap32(fl64.l_pid);
4493 unlock_user_struct(target_fl64, arg, 1);
4494 }
4495 break;
4496 case TARGET_F_SETLK64:
4497 case TARGET_F_SETLKW64:
4498 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4499 return -TARGET_EFAULT;
4500 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4501 fl64.l_whence = tswap16(target_fl64->l_whence);
4502 fl64.l_start = tswap64(target_fl64->l_start);
4503 fl64.l_len = tswap64(target_fl64->l_len);
4504 fl64.l_pid = tswap32(target_fl64->l_pid);
4505 unlock_user_struct(target_fl64, arg, 0);
4506 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4507 break;
4508
4509 case TARGET_F_GETFL:
4510 ret = get_errno(fcntl(fd, host_cmd, arg));
4511 if (ret >= 0) {
4512 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4513 }
4514 break;
4515
4516 case TARGET_F_SETFL:
4517 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4518 break;
4519
4520 case TARGET_F_SETOWN:
4521 case TARGET_F_GETOWN:
4522 case TARGET_F_SETSIG:
4523 case TARGET_F_GETSIG:
4524 case TARGET_F_SETLEASE:
4525 case TARGET_F_GETLEASE:
4526 ret = get_errno(fcntl(fd, host_cmd, arg));
4527 break;
4528
4529 default:
4530 ret = get_errno(fcntl(fd, cmd, arg));
4531 break;
4532 }
4533 return ret;
4534 }
4535
4536 #ifdef USE_UID16
4537
4538 static inline int high2lowuid(int uid)
4539 {
4540 if (uid > 65535)
4541 return 65534;
4542 else
4543 return uid;
4544 }
4545
4546 static inline int high2lowgid(int gid)
4547 {
4548 if (gid > 65535)
4549 return 65534;
4550 else
4551 return gid;
4552 }
4553
4554 static inline int low2highuid(int uid)
4555 {
4556 if ((int16_t)uid == -1)
4557 return -1;
4558 else
4559 return uid;
4560 }
4561
4562 static inline int low2highgid(int gid)
4563 {
4564 if ((int16_t)gid == -1)
4565 return -1;
4566 else
4567 return gid;
4568 }
4569 static inline int tswapid(int id)
4570 {
4571 return tswap16(id);
4572 }
4573 #else /* !USE_UID16 */
4574 static inline int high2lowuid(int uid)
4575 {
4576 return uid;
4577 }
4578 static inline int high2lowgid(int gid)
4579 {
4580 return gid;
4581 }
4582 static inline int low2highuid(int uid)
4583 {
4584 return uid;
4585 }
4586 static inline int low2highgid(int gid)
4587 {
4588 return gid;
4589 }
4590 static inline int tswapid(int id)
4591 {
4592 return tswap32(id);
4593 }
4594 #endif /* USE_UID16 */
4595
4596 void syscall_init(void)
4597 {
4598 IOCTLEntry *ie;
4599 const argtype *arg_type;
4600 int size;
4601 int i;
4602
4603 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4604 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4605 #include "syscall_types.h"
4606 #undef STRUCT
4607 #undef STRUCT_SPECIAL
4608
4609 /* Build target_to_host_errno_table[] table from
4610 * host_to_target_errno_table[]. */
4611 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4612 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4613 }
4614
4615 /* we patch the ioctl size if necessary. We rely on the fact that
4616 no ioctl has all the bits at '1' in the size field */
4617 ie = ioctl_entries;
4618 while (ie->target_cmd != 0) {
4619 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4620 TARGET_IOC_SIZEMASK) {
4621 arg_type = ie->arg_type;
4622 if (arg_type[0] != TYPE_PTR) {
4623 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4624 ie->target_cmd);
4625 exit(1);
4626 }
4627 arg_type++;
4628 size = thunk_type_size(arg_type, 0);
4629 ie->target_cmd = (ie->target_cmd &
4630 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4631 (size << TARGET_IOC_SIZESHIFT);
4632 }
4633
4634 /* automatic consistency check if same arch */
4635 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4636 (defined(__x86_64__) && defined(TARGET_X86_64))
4637 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4638 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4639 ie->name, ie->target_cmd, ie->host_cmd);
4640 }
4641 #endif
4642 ie++;
4643 }
4644 }
4645
4646 #if TARGET_ABI_BITS == 32
4647 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4648 {
4649 #ifdef TARGET_WORDS_BIGENDIAN
4650 return ((uint64_t)word0 << 32) | word1;
4651 #else
4652 return ((uint64_t)word1 << 32) | word0;
4653 #endif
4654 }
4655 #else /* TARGET_ABI_BITS == 32 */
4656 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4657 {
4658 return word0;
4659 }
4660 #endif /* TARGET_ABI_BITS != 32 */
4661
4662 #ifdef TARGET_NR_truncate64
4663 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4664 abi_long arg2,
4665 abi_long arg3,
4666 abi_long arg4)
4667 {
4668 if (regpairs_aligned(cpu_env)) {
4669 arg2 = arg3;
4670 arg3 = arg4;
4671 }
4672 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4673 }
4674 #endif
4675
4676 #ifdef TARGET_NR_ftruncate64
4677 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4678 abi_long arg2,
4679 abi_long arg3,
4680 abi_long arg4)
4681 {
4682 if (regpairs_aligned(cpu_env)) {
4683 arg2 = arg3;
4684 arg3 = arg4;
4685 }
4686 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4687 }
4688 #endif
4689
4690 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4691 abi_ulong target_addr)
4692 {
4693 struct target_timespec *target_ts;
4694
4695 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4696 return -TARGET_EFAULT;
4697 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4698 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4699 unlock_user_struct(target_ts, target_addr, 0);
4700 return 0;
4701 }
4702
4703 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4704 struct timespec *host_ts)
4705 {
4706 struct target_timespec *target_ts;
4707
4708 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4709 return -TARGET_EFAULT;
4710 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4711 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4712 unlock_user_struct(target_ts, target_addr, 1);
4713 return 0;
4714 }
4715
4716 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4717 static inline abi_long host_to_target_stat64(void *cpu_env,
4718 abi_ulong target_addr,
4719 struct stat *host_st)
4720 {
4721 #ifdef TARGET_ARM
4722 if (((CPUARMState *)cpu_env)->eabi) {
4723 struct target_eabi_stat64 *target_st;
4724
4725 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4726 return -TARGET_EFAULT;
4727 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4728 __put_user(host_st->st_dev, &target_st->st_dev);
4729 __put_user(host_st->st_ino, &target_st->st_ino);
4730 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4731 __put_user(host_st->st_ino, &target_st->__st_ino);
4732 #endif
4733 __put_user(host_st->st_mode, &target_st->st_mode);
4734 __put_user(host_st->st_nlink, &target_st->st_nlink);
4735 __put_user(host_st->st_uid, &target_st->st_uid);
4736 __put_user(host_st->st_gid, &target_st->st_gid);
4737 __put_user(host_st->st_rdev, &target_st->st_rdev);
4738 __put_user(host_st->st_size, &target_st->st_size);
4739 __put_user(host_st->st_blksize, &target_st->st_blksize);
4740 __put_user(host_st->st_blocks, &target_st->st_blocks);
4741 __put_user(host_st->st_atime, &target_st->target_st_atime);
4742 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4743 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4744 unlock_user_struct(target_st, target_addr, 1);
4745 } else
4746 #endif
4747 {
4748 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4749 struct target_stat *target_st;
4750 #else
4751 struct target_stat64 *target_st;
4752 #endif
4753
4754 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4755 return -TARGET_EFAULT;
4756 memset(target_st, 0, sizeof(*target_st));
4757 __put_user(host_st->st_dev, &target_st->st_dev);
4758 __put_user(host_st->st_ino, &target_st->st_ino);
4759 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4760 __put_user(host_st->st_ino, &target_st->__st_ino);
4761 #endif
4762 __put_user(host_st->st_mode, &target_st->st_mode);
4763 __put_user(host_st->st_nlink, &target_st->st_nlink);
4764 __put_user(host_st->st_uid, &target_st->st_uid);
4765 __put_user(host_st->st_gid, &target_st->st_gid);
4766 __put_user(host_st->st_rdev, &target_st->st_rdev);
4767 /* XXX: better use of kernel struct */
4768 __put_user(host_st->st_size, &target_st->st_size);
4769 __put_user(host_st->st_blksize, &target_st->st_blksize);
4770 __put_user(host_st->st_blocks, &target_st->st_blocks);
4771 __put_user(host_st->st_atime, &target_st->target_st_atime);
4772 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4773 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4774 unlock_user_struct(target_st, target_addr, 1);
4775 }
4776
4777 return 0;
4778 }
4779 #endif
4780
4781 #if defined(CONFIG_USE_NPTL)
4782 /* ??? Using host futex calls even when target atomic operations
4783 are not really atomic probably breaks things. However implementing
4784 futexes locally would make futexes shared between multiple processes
4785 tricky. However they're probably useless because guest atomic
4786 operations won't work either. */
4787 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4788 target_ulong uaddr2, int val3)
4789 {
4790 struct timespec ts, *pts;
4791 int base_op;
4792
4793 /* ??? We assume FUTEX_* constants are the same on both host
4794 and target. */
4795 #ifdef FUTEX_CMD_MASK
4796 base_op = op & FUTEX_CMD_MASK;
4797 #else
4798 base_op = op;
4799 #endif
4800 switch (base_op) {
4801 case FUTEX_WAIT:
4802 if (timeout) {
4803 pts = &ts;
4804 target_to_host_timespec(pts, timeout);
4805 } else {
4806 pts = NULL;
4807 }
4808 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4809 pts, NULL, 0));
4810 case FUTEX_WAKE:
4811 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4812 case FUTEX_FD:
4813 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4814 case FUTEX_REQUEUE:
4815 case FUTEX_CMP_REQUEUE:
4816 case FUTEX_WAKE_OP:
4817 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4818 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4819 But the prototype takes a `struct timespec *'; insert casts
4820 to satisfy the compiler. We do not need to tswap TIMEOUT
4821 since it's not compared to guest memory. */
4822 pts = (struct timespec *)(uintptr_t) timeout;
4823 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4824 g2h(uaddr2),
4825 (base_op == FUTEX_CMP_REQUEUE
4826 ? tswap32(val3)
4827 : val3)));
4828 default:
4829 return -TARGET_ENOSYS;
4830 }
4831 }
4832 #endif
4833
4834 /* Map host to target signal numbers for the wait family of syscalls.
4835 Assume all other status bits are the same. */
4836 static int host_to_target_waitstatus(int status)
4837 {
4838 if (WIFSIGNALED(status)) {
4839 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4840 }
4841 if (WIFSTOPPED(status)) {
4842 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4843 | (status & 0xff);
4844 }
4845 return status;
4846 }
4847
4848 int get_osversion(void)
4849 {
4850 static int osversion;
4851 struct new_utsname buf;
4852 const char *s;
4853 int i, n, tmp;
4854 if (osversion)
4855 return osversion;
4856 if (qemu_uname_release && *qemu_uname_release) {
4857 s = qemu_uname_release;
4858 } else {
4859 if (sys_uname(&buf))
4860 return 0;
4861 s = buf.release;
4862 }
4863 tmp = 0;
4864 for (i = 0; i < 3; i++) {
4865 n = 0;
4866 while (*s >= '0' && *s <= '9') {
4867 n *= 10;
4868 n += *s - '0';
4869 s++;
4870 }
4871 tmp = (tmp << 8) + n;
4872 if (*s == '.')
4873 s++;
4874 }
4875 osversion = tmp;
4876 return osversion;
4877 }
4878
4879
4880 static int open_self_maps(void *cpu_env, int fd)
4881 {
4882 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4883 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4884 #endif
4885 FILE *fp;
4886 char *line = NULL;
4887 size_t len = 0;
4888 ssize_t read;
4889
4890 fp = fopen("/proc/self/maps", "r");
4891 if (fp == NULL) {
4892 return -EACCES;
4893 }
4894
4895 while ((read = getline(&line, &len, fp)) != -1) {
4896 int fields, dev_maj, dev_min, inode;
4897 uint64_t min, max, offset;
4898 char flag_r, flag_w, flag_x, flag_p;
4899 char path[512] = "";
4900 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4901 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4902 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4903
4904 if ((fields < 10) || (fields > 11)) {
4905 continue;
4906 }
4907 if (!strncmp(path, "[stack]", 7)) {
4908 continue;
4909 }
4910 if (h2g_valid(min) && h2g_valid(max)) {
4911 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4912 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n",
4913 h2g(min), h2g(max), flag_r, flag_w,
4914 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4915 path[0] ? " " : "", path);
4916 }
4917 }
4918
4919 free(line);
4920 fclose(fp);
4921
4922 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4923 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4924 (unsigned long long)ts->info->stack_limit,
4925 (unsigned long long)(ts->stack_base + (TARGET_PAGE_SIZE - 1))
4926 & TARGET_PAGE_MASK,
4927 (unsigned long long)0);
4928 #endif
4929
4930 return 0;
4931 }
4932
4933 static int open_self_stat(void *cpu_env, int fd)
4934 {
4935 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4936 abi_ulong start_stack = ts->info->start_stack;
4937 int i;
4938
4939 for (i = 0; i < 44; i++) {
4940 char buf[128];
4941 int len;
4942 uint64_t val = 0;
4943
4944 if (i == 0) {
4945 /* pid */
4946 val = getpid();
4947 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4948 } else if (i == 1) {
4949 /* app name */
4950 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
4951 } else if (i == 27) {
4952 /* stack bottom */
4953 val = start_stack;
4954 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4955 } else {
4956 /* for the rest, there is MasterCard */
4957 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
4958 }
4959
4960 len = strlen(buf);
4961 if (write(fd, buf, len) != len) {
4962 return -1;
4963 }
4964 }
4965
4966 return 0;
4967 }
4968
4969 static int open_self_auxv(void *cpu_env, int fd)
4970 {
4971 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4972 abi_ulong auxv = ts->info->saved_auxv;
4973 abi_ulong len = ts->info->auxv_len;
4974 char *ptr;
4975
4976 /*
4977 * Auxiliary vector is stored in target process stack.
4978 * read in whole auxv vector and copy it to file
4979 */
4980 ptr = lock_user(VERIFY_READ, auxv, len, 0);
4981 if (ptr != NULL) {
4982 while (len > 0) {
4983 ssize_t r;
4984 r = write(fd, ptr, len);
4985 if (r <= 0) {
4986 break;
4987 }
4988 len -= r;
4989 ptr += r;
4990 }
4991 lseek(fd, 0, SEEK_SET);
4992 unlock_user(ptr, auxv, len);
4993 }
4994
4995 return 0;
4996 }
4997
4998 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
4999 {
5000 struct fake_open {
5001 const char *filename;
5002 int (*fill)(void *cpu_env, int fd);
5003 };
5004 const struct fake_open *fake_open;
5005 static const struct fake_open fakes[] = {
5006 { "/proc/self/maps", open_self_maps },
5007 { "/proc/self/stat", open_self_stat },
5008 { "/proc/self/auxv", open_self_auxv },
5009 { NULL, NULL }
5010 };
5011
5012 for (fake_open = fakes; fake_open->filename; fake_open++) {
5013 if (!strncmp(pathname, fake_open->filename,
5014 strlen(fake_open->filename))) {
5015 break;
5016 }
5017 }
5018
5019 if (fake_open->filename) {
5020 const char *tmpdir;
5021 char filename[PATH_MAX];
5022 int fd, r;
5023
5024 /* create temporary file to map stat to */
5025 tmpdir = getenv("TMPDIR");
5026 if (!tmpdir)
5027 tmpdir = "/tmp";
5028 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5029 fd = mkstemp(filename);
5030 if (fd < 0) {
5031 return fd;
5032 }
5033 unlink(filename);
5034
5035 if ((r = fake_open->fill(cpu_env, fd))) {
5036 close(fd);
5037 return r;
5038 }
5039 lseek(fd, 0, SEEK_SET);
5040
5041 return fd;
5042 }
5043
5044 return get_errno(open(path(pathname), flags, mode));
5045 }
5046
5047 /* do_syscall() should always have a single exit point at the end so
5048 that actions, such as logging of syscall results, can be performed.
5049 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5050 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5051 abi_long arg2, abi_long arg3, abi_long arg4,
5052 abi_long arg5, abi_long arg6, abi_long arg7,
5053 abi_long arg8)
5054 {
5055 abi_long ret;
5056 struct stat st;
5057 struct statfs stfs;
5058 void *p;
5059
5060 #ifdef DEBUG
5061 gemu_log("syscall %d", num);
5062 #endif
5063 if(do_strace)
5064 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5065
5066 switch(num) {
5067 case TARGET_NR_exit:
5068 #ifdef CONFIG_USE_NPTL
5069 /* In old applications this may be used to implement _exit(2).
5070 However in threaded applictions it is used for thread termination,
5071 and _exit_group is used for application termination.
5072 Do thread termination if we have more then one thread. */
5073 /* FIXME: This probably breaks if a signal arrives. We should probably
5074 be disabling signals. */
5075 if (first_cpu->next_cpu) {
5076 TaskState *ts;
5077 CPUArchState **lastp;
5078 CPUArchState *p;
5079
5080 cpu_list_lock();
5081 lastp = &first_cpu;
5082 p = first_cpu;
5083 while (p && p != (CPUArchState *)cpu_env) {
5084 lastp = &p->next_cpu;
5085 p = p->next_cpu;
5086 }
5087 /* If we didn't find the CPU for this thread then something is
5088 horribly wrong. */
5089 if (!p)
5090 abort();
5091 /* Remove the CPU from the list. */
5092 *lastp = p->next_cpu;
5093 cpu_list_unlock();
5094 ts = ((CPUArchState *)cpu_env)->opaque;
5095 if (ts->child_tidptr) {
5096 put_user_u32(0, ts->child_tidptr);
5097 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5098 NULL, NULL, 0);
5099 }
5100 thread_env = NULL;
5101 object_delete(OBJECT(ENV_GET_CPU(cpu_env)));
5102 g_free(ts);
5103 pthread_exit(NULL);
5104 }
5105 #endif
5106 #ifdef TARGET_GPROF
5107 _mcleanup();
5108 #endif
5109 gdb_exit(cpu_env, arg1);
5110 _exit(arg1);
5111 ret = 0; /* avoid warning */
5112 break;
5113 case TARGET_NR_read:
5114 if (arg3 == 0)
5115 ret = 0;
5116 else {
5117 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5118 goto efault;
5119 ret = get_errno(read(arg1, p, arg3));
5120 unlock_user(p, arg2, ret);
5121 }
5122 break;
5123 case TARGET_NR_write:
5124 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5125 goto efault;
5126 ret = get_errno(write(arg1, p, arg3));
5127 unlock_user(p, arg2, 0);
5128 break;
5129 case TARGET_NR_open:
5130 if (!(p = lock_user_string(arg1)))
5131 goto efault;
5132 ret = get_errno(do_open(cpu_env, p,
5133 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5134 arg3));
5135 unlock_user(p, arg1, 0);
5136 break;
5137 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5138 case TARGET_NR_openat:
5139 if (!(p = lock_user_string(arg2)))
5140 goto efault;
5141 ret = get_errno(sys_openat(arg1,
5142 path(p),
5143 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5144 arg4));
5145 unlock_user(p, arg2, 0);
5146 break;
5147 #endif
5148 case TARGET_NR_close:
5149 ret = get_errno(close(arg1));
5150 break;
5151 case TARGET_NR_brk:
5152 ret = do_brk(arg1);
5153 break;
5154 case TARGET_NR_fork:
5155 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5156 break;
5157 #ifdef TARGET_NR_waitpid
5158 case TARGET_NR_waitpid:
5159 {
5160 int status;
5161 ret = get_errno(waitpid(arg1, &status, arg3));
5162 if (!is_error(ret) && arg2 && ret
5163 && put_user_s32(host_to_target_waitstatus(status), arg2))
5164 goto efault;
5165 }
5166 break;
5167 #endif
5168 #ifdef TARGET_NR_waitid
5169 case TARGET_NR_waitid:
5170 {
5171 siginfo_t info;
5172 info.si_pid = 0;
5173 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5174 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5175 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5176 goto efault;
5177 host_to_target_siginfo(p, &info);
5178 unlock_user(p, arg3, sizeof(target_siginfo_t));
5179 }
5180 }
5181 break;
5182 #endif
5183 #ifdef TARGET_NR_creat /* not on alpha */
5184 case TARGET_NR_creat:
5185 if (!(p = lock_user_string(arg1)))
5186 goto efault;
5187 ret = get_errno(creat(p, arg2));
5188 unlock_user(p, arg1, 0);
5189 break;
5190 #endif
5191 case TARGET_NR_link:
5192 {
5193 void * p2;
5194 p = lock_user_string(arg1);
5195 p2 = lock_user_string(arg2);
5196 if (!p || !p2)
5197 ret = -TARGET_EFAULT;
5198 else
5199 ret = get_errno(link(p, p2));
5200 unlock_user(p2, arg2, 0);
5201 unlock_user(p, arg1, 0);
5202 }
5203 break;
5204 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5205 case TARGET_NR_linkat:
5206 {
5207 void * p2 = NULL;
5208 if (!arg2 || !arg4)
5209 goto efault;
5210 p = lock_user_string(arg2);
5211 p2 = lock_user_string(arg4);
5212 if (!p || !p2)
5213 ret = -TARGET_EFAULT;
5214 else
5215 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
5216 unlock_user(p, arg2, 0);
5217 unlock_user(p2, arg4, 0);
5218 }
5219 break;
5220 #endif
5221 case TARGET_NR_unlink:
5222 if (!(p = lock_user_string(arg1)))
5223 goto efault;
5224 ret = get_errno(unlink(p));
5225 unlock_user(p, arg1, 0);
5226 break;
5227 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5228 case TARGET_NR_unlinkat:
5229 if (!(p = lock_user_string(arg2)))
5230 goto efault;
5231 ret = get_errno(sys_unlinkat(arg1, p, arg3));
5232 unlock_user(p, arg2, 0);
5233 break;
5234 #endif
5235 case TARGET_NR_execve:
5236 {
5237 char **argp, **envp;
5238 int argc, envc;
5239 abi_ulong gp;
5240 abi_ulong guest_argp;
5241 abi_ulong guest_envp;
5242 abi_ulong addr;
5243 char **q;
5244 int total_size = 0;
5245
5246 argc = 0;
5247 guest_argp = arg2;
5248 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5249 if (get_user_ual(addr, gp))
5250 goto efault;
5251 if (!addr)
5252 break;
5253 argc++;
5254 }
5255 envc = 0;
5256 guest_envp = arg3;
5257 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5258 if (get_user_ual(addr, gp))
5259 goto efault;
5260 if (!addr)
5261 break;
5262 envc++;
5263 }
5264
5265 argp = alloca((argc + 1) * sizeof(void *));
5266 envp = alloca((envc + 1) * sizeof(void *));
5267
5268 for (gp = guest_argp, q = argp; gp;
5269 gp += sizeof(abi_ulong), q++) {
5270 if (get_user_ual(addr, gp))
5271 goto execve_efault;
5272 if (!addr)
5273 break;
5274 if (!(*q = lock_user_string(addr)))
5275 goto execve_efault;
5276 total_size += strlen(*q) + 1;
5277 }
5278 *q = NULL;
5279
5280 for (gp = guest_envp, q = envp; gp;
5281 gp += sizeof(abi_ulong), q++) {
5282 if (get_user_ual(addr, gp))
5283 goto execve_efault;
5284 if (!addr)
5285 break;
5286 if (!(*q = lock_user_string(addr)))
5287 goto execve_efault;
5288 total_size += strlen(*q) + 1;
5289 }
5290 *q = NULL;
5291
5292 /* This case will not be caught by the host's execve() if its
5293 page size is bigger than the target's. */
5294 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5295 ret = -TARGET_E2BIG;
5296 goto execve_end;
5297 }
5298 if (!(p = lock_user_string(arg1)))
5299 goto execve_efault;
5300 ret = get_errno(execve(p, argp, envp));
5301 unlock_user(p, arg1, 0);
5302
5303 goto execve_end;
5304
5305 execve_efault:
5306 ret = -TARGET_EFAULT;
5307
5308 execve_end:
5309 for (gp = guest_argp, q = argp; *q;
5310 gp += sizeof(abi_ulong), q++) {
5311 if (get_user_ual(addr, gp)
5312 || !addr)
5313 break;
5314 unlock_user(*q, addr, 0);
5315 }
5316 for (gp = guest_envp, q = envp; *q;
5317 gp += sizeof(abi_ulong), q++) {
5318 if (get_user_ual(addr, gp)
5319 || !addr)
5320 break;
5321 unlock_user(*q, addr, 0);
5322 }
5323 }
5324 break;
5325 case TARGET_NR_chdir:
5326 if (!(p = lock_user_string(arg1)))
5327 goto efault;
5328 ret = get_errno(chdir(p));
5329 unlock_user(p, arg1, 0);
5330 break;
5331 #ifdef TARGET_NR_time
5332 case TARGET_NR_time:
5333 {
5334 time_t host_time;
5335 ret = get_errno(time(&host_time));
5336 if (!is_error(ret)
5337 && arg1
5338 && put_user_sal(host_time, arg1))
5339 goto efault;
5340 }
5341 break;
5342 #endif
5343 case TARGET_NR_mknod:
5344 if (!(p = lock_user_string(arg1)))
5345 goto efault;
5346 ret = get_errno(mknod(p, arg2, arg3));
5347 unlock_user(p, arg1, 0);
5348 break;
5349 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5350 case TARGET_NR_mknodat:
5351 if (!(p = lock_user_string(arg2)))
5352 goto efault;
5353 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5354 unlock_user(p, arg2, 0);
5355 break;
5356 #endif
5357 case TARGET_NR_chmod:
5358 if (!(p = lock_user_string(arg1)))
5359 goto efault;
5360 ret = get_errno(chmod(p, arg2));
5361 unlock_user(p, arg1, 0);
5362 break;
5363 #ifdef TARGET_NR_break
5364 case TARGET_NR_break:
5365 goto unimplemented;
5366 #endif
5367 #ifdef TARGET_NR_oldstat
5368 case TARGET_NR_oldstat:
5369 goto unimplemented;
5370 #endif
5371 case TARGET_NR_lseek:
5372 ret = get_errno(lseek(arg1, arg2, arg3));
5373 break;
5374 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5375 /* Alpha specific */
5376 case TARGET_NR_getxpid:
5377 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5378 ret = get_errno(getpid());
5379 break;
5380 #endif
5381 #ifdef TARGET_NR_getpid
5382 case TARGET_NR_getpid:
5383 ret = get_errno(getpid());
5384 break;
5385 #endif
5386 case TARGET_NR_mount:
5387 {
5388 /* need to look at the data field */
5389 void *p2, *p3;
5390 p = lock_user_string(arg1);
5391 p2 = lock_user_string(arg2);
5392 p3 = lock_user_string(arg3);
5393 if (!p || !p2 || !p3)
5394 ret = -TARGET_EFAULT;
5395 else {
5396 /* FIXME - arg5 should be locked, but it isn't clear how to
5397 * do that since it's not guaranteed to be a NULL-terminated
5398 * string.
5399 */
5400 if ( ! arg5 )
5401 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5402 else
5403 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5404 }
5405 unlock_user(p, arg1, 0);
5406 unlock_user(p2, arg2, 0);
5407 unlock_user(p3, arg3, 0);
5408 break;
5409 }
5410 #ifdef TARGET_NR_umount
5411 case TARGET_NR_umount:
5412 if (!(p = lock_user_string(arg1)))
5413 goto efault;
5414 ret = get_errno(umount(p));
5415 unlock_user(p, arg1, 0);
5416 break;
5417 #endif
5418 #ifdef TARGET_NR_stime /* not on alpha */
5419 case TARGET_NR_stime:
5420 {
5421 time_t host_time;
5422 if (get_user_sal(host_time, arg1))
5423 goto efault;
5424 ret = get_errno(stime(&host_time));
5425 }
5426 break;
5427 #endif
5428 case TARGET_NR_ptrace:
5429 goto unimplemented;
5430 #ifdef TARGET_NR_alarm /* not on alpha */
5431 case TARGET_NR_alarm:
5432 ret = alarm(arg1);
5433 break;
5434 #endif
5435 #ifdef TARGET_NR_oldfstat
5436 case TARGET_NR_oldfstat:
5437 goto unimplemented;
5438 #endif
5439 #ifdef TARGET_NR_pause /* not on alpha */
5440 case TARGET_NR_pause:
5441 ret = get_errno(pause());
5442 break;
5443 #endif
5444 #ifdef TARGET_NR_utime
5445 case TARGET_NR_utime:
5446 {
5447 struct utimbuf tbuf, *host_tbuf;
5448 struct target_utimbuf *target_tbuf;
5449 if (arg2) {
5450 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5451 goto efault;
5452 tbuf.actime = tswapal(target_tbuf->actime);
5453 tbuf.modtime = tswapal(target_tbuf->modtime);
5454 unlock_user_struct(target_tbuf, arg2, 0);
5455 host_tbuf = &tbuf;
5456 } else {
5457 host_tbuf = NULL;
5458 }
5459 if (!(p = lock_user_string(arg1)))
5460 goto efault;
5461 ret = get_errno(utime(p, host_tbuf));
5462 unlock_user(p, arg1, 0);
5463 }
5464 break;
5465 #endif
5466 case TARGET_NR_utimes:
5467 {
5468 struct timeval *tvp, tv[2];
5469 if (arg2) {
5470 if (copy_from_user_timeval(&tv[0], arg2)
5471 || copy_from_user_timeval(&tv[1],
5472 arg2 + sizeof(struct target_timeval)))
5473 goto efault;
5474 tvp = tv;
5475 } else {
5476 tvp = NULL;
5477 }
5478 if (!(p = lock_user_string(arg1)))
5479 goto efault;
5480 ret = get_errno(utimes(p, tvp));
5481 unlock_user(p, arg1, 0);
5482 }
5483 break;
5484 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5485 case TARGET_NR_futimesat:
5486 {
5487 struct timeval *tvp, tv[2];
5488 if (arg3) {
5489 if (copy_from_user_timeval(&tv[0], arg3)
5490 || copy_from_user_timeval(&tv[1],
5491 arg3 + sizeof(struct target_timeval)))
5492 goto efault;
5493 tvp = tv;
5494 } else {
5495 tvp = NULL;
5496 }
5497 if (!(p = lock_user_string(arg2)))
5498 goto efault;
5499 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5500 unlock_user(p, arg2, 0);
5501 }
5502 break;
5503 #endif
5504 #ifdef TARGET_NR_stty
5505 case TARGET_NR_stty:
5506 goto unimplemented;
5507 #endif
5508 #ifdef TARGET_NR_gtty
5509 case TARGET_NR_gtty:
5510 goto unimplemented;
5511 #endif
5512 case TARGET_NR_access:
5513 if (!(p = lock_user_string(arg1)))
5514 goto efault;
5515 ret = get_errno(access(path(p), arg2));
5516 unlock_user(p, arg1, 0);
5517 break;
5518 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5519 case TARGET_NR_faccessat:
5520 if (!(p = lock_user_string(arg2)))
5521 goto efault;
5522 ret = get_errno(sys_faccessat(arg1, p, arg3));
5523 unlock_user(p, arg2, 0);
5524 break;
5525 #endif
5526 #ifdef TARGET_NR_nice /* not on alpha */
5527 case TARGET_NR_nice:
5528 ret = get_errno(nice(arg1));
5529 break;
5530 #endif
5531 #ifdef TARGET_NR_ftime
5532 case TARGET_NR_ftime:
5533 goto unimplemented;
5534 #endif
5535 case TARGET_NR_sync:
5536 sync();
5537 ret = 0;
5538 break;
5539 case TARGET_NR_kill:
5540 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5541 break;
5542 case TARGET_NR_rename:
5543 {
5544 void *p2;
5545 p = lock_user_string(arg1);
5546 p2 = lock_user_string(arg2);
5547 if (!p || !p2)
5548 ret = -TARGET_EFAULT;
5549 else
5550 ret = get_errno(rename(p, p2));
5551 unlock_user(p2, arg2, 0);
5552 unlock_user(p, arg1, 0);
5553 }
5554 break;
5555 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5556 case TARGET_NR_renameat:
5557 {
5558 void *p2;
5559 p = lock_user_string(arg2);
5560 p2 = lock_user_string(arg4);
5561 if (!p || !p2)
5562 ret = -TARGET_EFAULT;
5563 else
5564 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5565 unlock_user(p2, arg4, 0);
5566 unlock_user(p, arg2, 0);
5567 }
5568 break;
5569 #endif
5570 case TARGET_NR_mkdir:
5571 if (!(p = lock_user_string(arg1)))
5572 goto efault;
5573 ret = get_errno(mkdir(p, arg2));
5574 unlock_user(p, arg1, 0);
5575 break;
5576 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5577 case TARGET_NR_mkdirat:
5578 if (!(p = lock_user_string(arg2)))
5579 goto efault;
5580 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5581 unlock_user(p, arg2, 0);
5582 break;
5583 #endif
5584 case TARGET_NR_rmdir:
5585 if (!(p = lock_user_string(arg1)))
5586 goto efault;
5587 ret = get_errno(rmdir(p));
5588 unlock_user(p, arg1, 0);
5589 break;
5590 case TARGET_NR_dup:
5591 ret = get_errno(dup(arg1));
5592 break;
5593 case TARGET_NR_pipe:
5594 ret = do_pipe(cpu_env, arg1, 0, 0);
5595 break;
5596 #ifdef TARGET_NR_pipe2
5597 case TARGET_NR_pipe2:
5598 ret = do_pipe(cpu_env, arg1,
5599 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5600 break;
5601 #endif
5602 case TARGET_NR_times:
5603 {
5604 struct target_tms *tmsp;
5605 struct tms tms;
5606 ret = get_errno(times(&tms));
5607 if (arg1) {
5608 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5609 if (!tmsp)
5610 goto efault;
5611 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5612 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5613 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5614 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5615 }
5616 if (!is_error(ret))
5617 ret = host_to_target_clock_t(ret);
5618 }
5619 break;
5620 #ifdef TARGET_NR_prof
5621 case TARGET_NR_prof:
5622 goto unimplemented;
5623 #endif
5624 #ifdef TARGET_NR_signal
5625 case TARGET_NR_signal:
5626 goto unimplemented;
5627 #endif
5628 case TARGET_NR_acct:
5629 if (arg1 == 0) {
5630 ret = get_errno(acct(NULL));
5631 } else {
5632 if (!(p = lock_user_string(arg1)))
5633 goto efault;
5634 ret = get_errno(acct(path(p)));
5635 unlock_user(p, arg1, 0);
5636 }
5637 break;
5638 #ifdef TARGET_NR_umount2 /* not on alpha */
5639 case TARGET_NR_umount2:
5640 if (!(p = lock_user_string(arg1)))
5641 goto efault;
5642 ret = get_errno(umount2(p, arg2));
5643 unlock_user(p, arg1, 0);
5644 break;
5645 #endif
5646 #ifdef TARGET_NR_lock
5647 case TARGET_NR_lock:
5648 goto unimplemented;
5649 #endif
5650 case TARGET_NR_ioctl:
5651 ret = do_ioctl(arg1, arg2, arg3);
5652 break;
5653 case TARGET_NR_fcntl:
5654 ret = do_fcntl(arg1, arg2, arg3);
5655 break;
5656 #ifdef TARGET_NR_mpx
5657 case TARGET_NR_mpx:
5658 goto unimplemented;
5659 #endif
5660 case TARGET_NR_setpgid:
5661 ret = get_errno(setpgid(arg1, arg2));
5662 break;
5663 #ifdef TARGET_NR_ulimit
5664 case TARGET_NR_ulimit:
5665 goto unimplemented;
5666 #endif
5667 #ifdef TARGET_NR_oldolduname
5668 case TARGET_NR_oldolduname:
5669 goto unimplemented;
5670 #endif
5671 case TARGET_NR_umask:
5672 ret = get_errno(umask(arg1));
5673 break;
5674 case TARGET_NR_chroot:
5675 if (!(p = lock_user_string(arg1)))
5676 goto efault;
5677 ret = get_errno(chroot(p));
5678 unlock_user(p, arg1, 0);
5679 break;
5680 case TARGET_NR_ustat:
5681 goto unimplemented;
5682 case TARGET_NR_dup2:
5683 ret = get_errno(dup2(arg1, arg2));
5684 break;
5685 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5686 case TARGET_NR_dup3:
5687 ret = get_errno(dup3(arg1, arg2, arg3));
5688 break;
5689 #endif
5690 #ifdef TARGET_NR_getppid /* not on alpha */
5691 case TARGET_NR_getppid:
5692 ret = get_errno(getppid());
5693 break;
5694 #endif
5695 case TARGET_NR_getpgrp:
5696 ret = get_errno(getpgrp());
5697 break;
5698 case TARGET_NR_setsid:
5699 ret = get_errno(setsid());
5700 break;
5701 #ifdef TARGET_NR_sigaction
5702 case TARGET_NR_sigaction:
5703 {
5704 #if defined(TARGET_ALPHA)
5705 struct target_sigaction act, oact, *pact = 0;
5706 struct target_old_sigaction *old_act;
5707 if (arg2) {
5708 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5709 goto efault;
5710 act._sa_handler = old_act->_sa_handler;
5711 target_siginitset(&act.sa_mask, old_act->sa_mask);
5712 act.sa_flags = old_act->sa_flags;
5713 act.sa_restorer = 0;
5714 unlock_user_struct(old_act, arg2, 0);
5715 pact = &act;
5716 }
5717 ret = get_errno(do_sigaction(arg1, pact, &oact));
5718 if (!is_error(ret) && arg3) {
5719 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5720 goto efault;
5721 old_act->_sa_handler = oact._sa_handler;
5722 old_act->sa_mask = oact.sa_mask.sig[0];
5723 old_act->sa_flags = oact.sa_flags;
5724 unlock_user_struct(old_act, arg3, 1);
5725 }
5726 #elif defined(TARGET_MIPS)
5727 struct target_sigaction act, oact, *pact, *old_act;
5728
5729 if (arg2) {
5730 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5731 goto efault;
5732 act._sa_handler = old_act->_sa_handler;
5733 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5734 act.sa_flags = old_act->sa_flags;
5735 unlock_user_struct(old_act, arg2, 0);
5736 pact = &act;
5737 } else {
5738 pact = NULL;
5739 }
5740
5741 ret = get_errno(do_sigaction(arg1, pact, &oact));
5742
5743 if (!is_error(ret) && arg3) {
5744 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5745 goto efault;
5746 old_act->_sa_handler = oact._sa_handler;
5747 old_act->sa_flags = oact.sa_flags;
5748 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5749 old_act->sa_mask.sig[1] = 0;
5750 old_act->sa_mask.sig[2] = 0;
5751 old_act->sa_mask.sig[3] = 0;
5752 unlock_user_struct(old_act, arg3, 1);
5753 }
5754 #else
5755 struct target_old_sigaction *old_act;
5756 struct target_sigaction act, oact, *pact;
5757 if (arg2) {
5758 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5759 goto efault;
5760 act._sa_handler = old_act->_sa_handler;
5761 target_siginitset(&act.sa_mask, old_act->sa_mask);
5762 act.sa_flags = old_act->sa_flags;
5763 act.sa_restorer = old_act->sa_restorer;
5764 unlock_user_struct(old_act, arg2, 0);
5765 pact = &act;
5766 } else {
5767 pact = NULL;
5768 }
5769 ret = get_errno(do_sigaction(arg1, pact, &oact));
5770 if (!is_error(ret) && arg3) {
5771 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5772 goto efault;
5773 old_act->_sa_handler = oact._sa_handler;
5774 old_act->sa_mask = oact.sa_mask.sig[0];
5775 old_act->sa_flags = oact.sa_flags;
5776 old_act->sa_restorer = oact.sa_restorer;
5777 unlock_user_struct(old_act, arg3, 1);
5778 }
5779 #endif
5780 }
5781 break;
5782 #endif
5783 case TARGET_NR_rt_sigaction:
5784 {
5785 #if defined(TARGET_ALPHA)
5786 struct target_sigaction act, oact, *pact = 0;
5787 struct target_rt_sigaction *rt_act;
5788 /* ??? arg4 == sizeof(sigset_t). */
5789 if (arg2) {
5790 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5791 goto efault;
5792 act._sa_handler = rt_act->_sa_handler;
5793 act.sa_mask = rt_act->sa_mask;
5794 act.sa_flags = rt_act->sa_flags;
5795 act.sa_restorer = arg5;
5796 unlock_user_struct(rt_act, arg2, 0);
5797 pact = &act;
5798 }
5799 ret = get_errno(do_sigaction(arg1, pact, &oact));
5800 if (!is_error(ret) && arg3) {
5801 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5802 goto efault;
5803 rt_act->_sa_handler = oact._sa_handler;
5804 rt_act->sa_mask = oact.sa_mask;
5805 rt_act->sa_flags = oact.sa_flags;
5806 unlock_user_struct(rt_act, arg3, 1);
5807 }
5808 #else
5809 struct target_sigaction *act;
5810 struct target_sigaction *oact;
5811
5812 if (arg2) {
5813 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5814 goto efault;
5815 } else
5816 act = NULL;
5817 if (arg3) {
5818 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5819 ret = -TARGET_EFAULT;
5820 goto rt_sigaction_fail;
5821 }
5822 } else
5823 oact = NULL;
5824 ret = get_errno(do_sigaction(arg1, act, oact));
5825 rt_sigaction_fail:
5826 if (act)
5827 unlock_user_struct(act, arg2, 0);
5828 if (oact)
5829 unlock_user_struct(oact, arg3, 1);
5830 #endif
5831 }
5832 break;
5833 #ifdef TARGET_NR_sgetmask /* not on alpha */
5834 case TARGET_NR_sgetmask:
5835 {
5836 sigset_t cur_set;
5837 abi_ulong target_set;
5838 sigprocmask(0, NULL, &cur_set);
5839 host_to_target_old_sigset(&target_set, &cur_set);
5840 ret = target_set;
5841 }
5842 break;
5843 #endif
5844 #ifdef TARGET_NR_ssetmask /* not on alpha */
5845 case TARGET_NR_ssetmask:
5846 {
5847 sigset_t set, oset, cur_set;
5848 abi_ulong target_set = arg1;
5849 sigprocmask(0, NULL, &cur_set);
5850 target_to_host_old_sigset(&set, &target_set);
5851 sigorset(&set, &set, &cur_set);
5852 sigprocmask(SIG_SETMASK, &set, &oset);
5853 host_to_target_old_sigset(&target_set, &oset);
5854 ret = target_set;
5855 }
5856 break;
5857 #endif
5858 #ifdef TARGET_NR_sigprocmask
5859 case TARGET_NR_sigprocmask:
5860 {
5861 #if defined(TARGET_ALPHA)
5862 sigset_t set, oldset;
5863 abi_ulong mask;
5864 int how;
5865
5866 switch (arg1) {
5867 case TARGET_SIG_BLOCK:
5868 how = SIG_BLOCK;
5869 break;
5870 case TARGET_SIG_UNBLOCK:
5871 how = SIG_UNBLOCK;
5872 break;
5873 case TARGET_SIG_SETMASK:
5874 how = SIG_SETMASK;
5875 break;
5876 default:
5877 ret = -TARGET_EINVAL;
5878 goto fail;
5879 }
5880 mask = arg2;
5881 target_to_host_old_sigset(&set, &mask);
5882
5883 ret = get_errno(sigprocmask(how, &set, &oldset));
5884 if (!is_error(ret)) {
5885 host_to_target_old_sigset(&mask, &oldset);
5886 ret = mask;
5887 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5888 }
5889 #else
5890 sigset_t set, oldset, *set_ptr;
5891 int how;
5892
5893 if (arg2) {
5894 switch (arg1) {
5895 case TARGET_SIG_BLOCK:
5896 how = SIG_BLOCK;
5897 break;
5898 case TARGET_SIG_UNBLOCK:
5899 how = SIG_UNBLOCK;
5900 break;
5901 case TARGET_SIG_SETMASK:
5902 how = SIG_SETMASK;
5903 break;
5904 default:
5905 ret = -TARGET_EINVAL;
5906 goto fail;
5907 }
5908 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5909 goto efault;
5910 target_to_host_old_sigset(&set, p);
5911 unlock_user(p, arg2, 0);
5912 set_ptr = &set;
5913 } else {
5914 how = 0;
5915 set_ptr = NULL;
5916 }
5917 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5918 if (!is_error(ret) && arg3) {
5919 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5920 goto efault;
5921 host_to_target_old_sigset(p, &oldset);
5922 unlock_user(p, arg3, sizeof(target_sigset_t));
5923 }
5924 #endif
5925 }
5926 break;
5927 #endif
5928 case TARGET_NR_rt_sigprocmask:
5929 {
5930 int how = arg1;
5931 sigset_t set, oldset, *set_ptr;
5932
5933 if (arg2) {
5934 switch(how) {
5935 case TARGET_SIG_BLOCK:
5936 how = SIG_BLOCK;
5937 break;
5938 case TARGET_SIG_UNBLOCK:
5939 how = SIG_UNBLOCK;
5940 break;
5941 case TARGET_SIG_SETMASK:
5942 how = SIG_SETMASK;
5943 break;
5944 default:
5945 ret = -TARGET_EINVAL;
5946 goto fail;
5947 }
5948 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5949 goto efault;
5950 target_to_host_sigset(&set, p);
5951 unlock_user(p, arg2, 0);
5952 set_ptr = &set;
5953 } else {
5954 how = 0;
5955 set_ptr = NULL;
5956 }
5957 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5958 if (!is_error(ret) && arg3) {
5959 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5960 goto efault;
5961 host_to_target_sigset(p, &oldset);
5962 unlock_user(p, arg3, sizeof(target_sigset_t));
5963 }
5964 }
5965 break;
5966 #ifdef TARGET_NR_sigpending
5967 case TARGET_NR_sigpending:
5968 {
5969 sigset_t set;
5970 ret = get_errno(sigpending(&set));
5971 if (!is_error(ret)) {
5972 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5973 goto efault;
5974 host_to_target_old_sigset(p, &set);
5975 unlock_user(p, arg1, sizeof(target_sigset_t));
5976 }
5977 }
5978 break;
5979 #endif
5980 case TARGET_NR_rt_sigpending:
5981 {
5982 sigset_t set;
5983 ret = get_errno(sigpending(&set));
5984 if (!is_error(ret)) {
5985 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5986 goto efault;
5987 host_to_target_sigset(p, &set);
5988 unlock_user(p, arg1, sizeof(target_sigset_t));
5989 }
5990 }
5991 break;
5992 #ifdef TARGET_NR_sigsuspend
5993 case TARGET_NR_sigsuspend:
5994 {
5995 sigset_t set;
5996 #if defined(TARGET_ALPHA)
5997 abi_ulong mask = arg1;
5998 target_to_host_old_sigset(&set, &mask);
5999 #else
6000 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6001 goto efault;
6002 target_to_host_old_sigset(&set, p);
6003 unlock_user(p, arg1, 0);
6004 #endif
6005 ret = get_errno(sigsuspend(&set));
6006 }
6007 break;
6008 #endif
6009 case TARGET_NR_rt_sigsuspend:
6010 {
6011 sigset_t set;
6012 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6013 goto efault;
6014 target_to_host_sigset(&set, p);
6015 unlock_user(p, arg1, 0);
6016 ret = get_errno(sigsuspend(&set));
6017 }
6018 break;
6019 case TARGET_NR_rt_sigtimedwait:
6020 {
6021 sigset_t set;
6022 struct timespec uts, *puts;
6023 siginfo_t uinfo;
6024
6025 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6026 goto efault;
6027 target_to_host_sigset(&set, p);
6028 unlock_user(p, arg1, 0);
6029 if (arg3) {
6030 puts = &uts;
6031 target_to_host_timespec(puts, arg3);
6032 } else {
6033 puts = NULL;
6034 }
6035 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6036 if (!is_error(ret) && arg2) {
6037 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6038 goto efault;
6039 host_to_target_siginfo(p, &uinfo);
6040 unlock_user(p, arg2, sizeof(target_siginfo_t));
6041 }
6042 }
6043 break;
6044 case TARGET_NR_rt_sigqueueinfo:
6045 {
6046 siginfo_t uinfo;
6047 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6048 goto efault;
6049 target_to_host_siginfo(&uinfo, p);
6050 unlock_user(p, arg1, 0);
6051 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6052 }
6053 break;
6054 #ifdef TARGET_NR_sigreturn
6055 case TARGET_NR_sigreturn:
6056 /* NOTE: ret is eax, so not transcoding must be done */
6057 ret = do_sigreturn(cpu_env);
6058 break;
6059 #endif
6060 case TARGET_NR_rt_sigreturn:
6061 /* NOTE: ret is eax, so not transcoding must be done */
6062 ret = do_rt_sigreturn(cpu_env);
6063 break;
6064 case TARGET_NR_sethostname:
6065 if (!(p = lock_user_string(arg1)))
6066 goto efault;
6067 ret = get_errno(sethostname(p, arg2));
6068 unlock_user(p, arg1, 0);
6069 break;
6070 case TARGET_NR_setrlimit:
6071 {
6072 int resource = target_to_host_resource(arg1);
6073 struct target_rlimit *target_rlim;
6074 struct rlimit rlim;
6075 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6076 goto efault;
6077 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6078 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6079 unlock_user_struct(target_rlim, arg2, 0);
6080 ret = get_errno(setrlimit(resource, &rlim));
6081 }
6082 break;
6083 case TARGET_NR_getrlimit:
6084 {
6085 int resource = target_to_host_resource(arg1);
6086 struct target_rlimit *target_rlim;
6087 struct rlimit rlim;
6088
6089 ret = get_errno(getrlimit(resource, &rlim));
6090 if (!is_error(ret)) {
6091 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6092 goto efault;
6093 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6094 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6095 unlock_user_struct(target_rlim, arg2, 1);
6096 }
6097 }
6098 break;
6099 case TARGET_NR_getrusage:
6100 {
6101 struct rusage rusage;
6102 ret = get_errno(getrusage(arg1, &rusage));
6103 if (!is_error(ret)) {
6104 host_to_target_rusage(arg2, &rusage);
6105 }
6106 }
6107 break;
6108 case TARGET_NR_gettimeofday:
6109 {
6110 struct timeval tv;
6111 ret = get_errno(gettimeofday(&tv, NULL));
6112 if (!is_error(ret)) {
6113 if (copy_to_user_timeval(arg1, &tv))
6114 goto efault;
6115 }
6116 }
6117 break;
6118 case TARGET_NR_settimeofday:
6119 {
6120 struct timeval tv;
6121 if (copy_from_user_timeval(&tv, arg1))
6122 goto efault;
6123 ret = get_errno(settimeofday(&tv, NULL));
6124 }
6125 break;
6126 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
6127 case TARGET_NR_select:
6128 {
6129 struct target_sel_arg_struct *sel;
6130 abi_ulong inp, outp, exp, tvp;
6131 long nsel;
6132
6133 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6134 goto efault;
6135 nsel = tswapal(sel->n);
6136 inp = tswapal(sel->inp);
6137 outp = tswapal(sel->outp);
6138 exp = tswapal(sel->exp);
6139 tvp = tswapal(sel->tvp);
6140 unlock_user_struct(sel, arg1, 0);
6141 ret = do_select(nsel, inp, outp, exp, tvp);
6142 }
6143 break;
6144 #endif
6145 #ifdef TARGET_NR_pselect6
6146 case TARGET_NR_pselect6:
6147 {
6148 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6149 fd_set rfds, wfds, efds;
6150 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6151 struct timespec ts, *ts_ptr;
6152
6153 /*
6154 * The 6th arg is actually two args smashed together,
6155 * so we cannot use the C library.
6156 */
6157 sigset_t set;
6158 struct {
6159 sigset_t *set;
6160 size_t size;
6161 } sig, *sig_ptr;
6162
6163 abi_ulong arg_sigset, arg_sigsize, *arg7;
6164 target_sigset_t *target_sigset;
6165
6166 n = arg1;
6167 rfd_addr = arg2;
6168 wfd_addr = arg3;
6169 efd_addr = arg4;
6170 ts_addr = arg5;
6171
6172 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6173 if (ret) {
6174 goto fail;
6175 }
6176 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6177 if (ret) {
6178 goto fail;
6179 }
6180 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6181 if (ret) {
6182 goto fail;
6183 }
6184
6185 /*
6186 * This takes a timespec, and not a timeval, so we cannot
6187 * use the do_select() helper ...
6188 */
6189 if (ts_addr) {
6190 if (target_to_host_timespec(&ts, ts_addr)) {
6191 goto efault;
6192 }
6193 ts_ptr = &ts;
6194 } else {
6195 ts_ptr = NULL;
6196 }
6197
6198 /* Extract the two packed args for the sigset */
6199 if (arg6) {
6200 sig_ptr = &sig;
6201 sig.size = _NSIG / 8;
6202
6203 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6204 if (!arg7) {
6205 goto efault;
6206 }
6207 arg_sigset = tswapal(arg7[0]);
6208 arg_sigsize = tswapal(arg7[1]);
6209 unlock_user(arg7, arg6, 0);
6210
6211 if (arg_sigset) {
6212 sig.set = &set;
6213 if (arg_sigsize != sizeof(*target_sigset)) {
6214 /* Like the kernel, we enforce correct size sigsets */
6215 ret = -TARGET_EINVAL;
6216 goto fail;
6217 }
6218 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6219 sizeof(*target_sigset), 1);
6220 if (!target_sigset) {
6221 goto efault;
6222 }
6223 target_to_host_sigset(&set, target_sigset);
6224 unlock_user(target_sigset, arg_sigset, 0);
6225 } else {
6226 sig.set = NULL;
6227 }
6228 } else {
6229 sig_ptr = NULL;
6230 }
6231
6232 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6233 ts_ptr, sig_ptr));
6234
6235 if (!is_error(ret)) {
6236 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6237 goto efault;
6238 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6239 goto efault;
6240 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6241 goto efault;
6242
6243 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6244 goto efault;
6245 }
6246 }
6247 break;
6248 #endif
6249 case TARGET_NR_symlink:
6250 {
6251 void *p2;
6252 p = lock_user_string(arg1);
6253 p2 = lock_user_string(arg2);
6254 if (!p || !p2)
6255 ret = -TARGET_EFAULT;
6256 else
6257 ret = get_errno(symlink(p, p2));
6258 unlock_user(p2, arg2, 0);
6259 unlock_user(p, arg1, 0);
6260 }
6261 break;
6262 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6263 case TARGET_NR_symlinkat:
6264 {
6265 void *p2;
6266 p = lock_user_string(arg1);
6267 p2 = lock_user_string(arg3);
6268 if (!p || !p2)
6269 ret = -TARGET_EFAULT;
6270 else
6271 ret = get_errno(sys_symlinkat(p, arg2, p2));
6272 unlock_user(p2, arg3, 0);
6273 unlock_user(p, arg1, 0);
6274 }
6275 break;
6276 #endif
6277 #ifdef TARGET_NR_oldlstat
6278 case TARGET_NR_oldlstat:
6279 goto unimplemented;
6280 #endif
6281 case TARGET_NR_readlink:
6282 {
6283 void *p2, *temp;
6284 p = lock_user_string(arg1);
6285 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6286 if (!p || !p2)
6287 ret = -TARGET_EFAULT;
6288 else {
6289 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6290 char real[PATH_MAX];
6291 temp = realpath(exec_path,real);
6292 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6293 snprintf((char *)p2, arg3, "%s", real);
6294 }
6295 else
6296 ret = get_errno(readlink(path(p), p2, arg3));
6297 }
6298 unlock_user(p2, arg2, ret);
6299 unlock_user(p, arg1, 0);
6300 }
6301 break;
6302 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6303 case TARGET_NR_readlinkat:
6304 {
6305 void *p2;
6306 p = lock_user_string(arg2);
6307 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6308 if (!p || !p2)
6309 ret = -TARGET_EFAULT;
6310 else
6311 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6312 unlock_user(p2, arg3, ret);
6313 unlock_user(p, arg2, 0);
6314 }
6315 break;
6316 #endif
6317 #ifdef TARGET_NR_uselib
6318 case TARGET_NR_uselib:
6319 goto unimplemented;
6320 #endif
6321 #ifdef TARGET_NR_swapon
6322 case TARGET_NR_swapon:
6323 if (!(p = lock_user_string(arg1)))
6324 goto efault;
6325 ret = get_errno(swapon(p, arg2));
6326 unlock_user(p, arg1, 0);
6327 break;
6328 #endif
6329 case TARGET_NR_reboot:
6330 if (!(p = lock_user_string(arg4)))
6331 goto efault;
6332 ret = reboot(arg1, arg2, arg3, p);
6333 unlock_user(p, arg4, 0);
6334 break;
6335 #ifdef TARGET_NR_readdir
6336 case TARGET_NR_readdir:
6337 goto unimplemented;
6338 #endif
6339 #ifdef TARGET_NR_mmap
6340 case TARGET_NR_mmap:
6341 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6342 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6343 || defined(TARGET_S390X)
6344 {
6345 abi_ulong *v;
6346 abi_ulong v1, v2, v3, v4, v5, v6;
6347 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6348 goto efault;
6349 v1 = tswapal(v[0]);
6350 v2 = tswapal(v[1]);
6351 v3 = tswapal(v[2]);
6352 v4 = tswapal(v[3]);
6353 v5 = tswapal(v[4]);
6354 v6 = tswapal(v[5]);
6355 unlock_user(v, arg1, 0);
6356 ret = get_errno(target_mmap(v1, v2, v3,
6357 target_to_host_bitmask(v4, mmap_flags_tbl),
6358 v5, v6));
6359 }
6360 #else
6361 ret = get_errno(target_mmap(arg1, arg2, arg3,
6362 target_to_host_bitmask(arg4, mmap_flags_tbl),
6363 arg5,
6364 arg6));
6365 #endif
6366 break;
6367 #endif
6368 #ifdef TARGET_NR_mmap2
6369 case TARGET_NR_mmap2:
6370 #ifndef MMAP_SHIFT
6371 #define MMAP_SHIFT 12
6372 #endif
6373 ret = get_errno(target_mmap(arg1, arg2, arg3,
6374 target_to_host_bitmask(arg4, mmap_flags_tbl),
6375 arg5,
6376 arg6 << MMAP_SHIFT));
6377 break;
6378 #endif
6379 case TARGET_NR_munmap:
6380 ret = get_errno(target_munmap(arg1, arg2));
6381 break;
6382 case TARGET_NR_mprotect:
6383 {
6384 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6385 /* Special hack to detect libc making the stack executable. */
6386 if ((arg3 & PROT_GROWSDOWN)
6387 && arg1 >= ts->info->stack_limit
6388 && arg1 <= ts->info->start_stack) {
6389 arg3 &= ~PROT_GROWSDOWN;
6390 arg2 = arg2 + arg1 - ts->info->stack_limit;
6391 arg1 = ts->info->stack_limit;
6392 }
6393 }
6394 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6395 break;
6396 #ifdef TARGET_NR_mremap
6397 case TARGET_NR_mremap:
6398 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6399 break;
6400 #endif
6401 /* ??? msync/mlock/munlock are broken for softmmu. */
6402 #ifdef TARGET_NR_msync
6403 case TARGET_NR_msync:
6404 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6405 break;
6406 #endif
6407 #ifdef TARGET_NR_mlock
6408 case TARGET_NR_mlock:
6409 ret = get_errno(mlock(g2h(arg1), arg2));
6410 break;
6411 #endif
6412 #ifdef TARGET_NR_munlock
6413 case TARGET_NR_munlock:
6414 ret = get_errno(munlock(g2h(arg1), arg2));
6415 break;
6416 #endif
6417 #ifdef TARGET_NR_mlockall
6418 case TARGET_NR_mlockall:
6419 ret = get_errno(mlockall(arg1));
6420 break;
6421 #endif
6422 #ifdef TARGET_NR_munlockall
6423 case TARGET_NR_munlockall:
6424 ret = get_errno(munlockall());
6425 break;
6426 #endif
6427 case TARGET_NR_truncate:
6428 if (!(p = lock_user_string(arg1)))
6429 goto efault;
6430 ret = get_errno(truncate(p, arg2));
6431 unlock_user(p, arg1, 0);
6432 break;
6433 case TARGET_NR_ftruncate:
6434 ret = get_errno(ftruncate(arg1, arg2));
6435 break;
6436 case TARGET_NR_fchmod:
6437 ret = get_errno(fchmod(arg1, arg2));
6438 break;
6439 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6440 case TARGET_NR_fchmodat:
6441 if (!(p = lock_user_string(arg2)))
6442 goto efault;
6443 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6444 unlock_user(p, arg2, 0);
6445 break;
6446 #endif
6447 case TARGET_NR_getpriority:
6448 /* Note that negative values are valid for getpriority, so we must
6449 differentiate based on errno settings. */
6450 errno = 0;
6451 ret = getpriority(arg1, arg2);
6452 if (ret == -1 && errno != 0) {
6453 ret = -host_to_target_errno(errno);
6454 break;
6455 }
6456 #ifdef TARGET_ALPHA
6457 /* Return value is the unbiased priority. Signal no error. */
6458 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6459 #else
6460 /* Return value is a biased priority to avoid negative numbers. */
6461 ret = 20 - ret;
6462 #endif
6463 break;
6464 case TARGET_NR_setpriority:
6465 ret = get_errno(setpriority(arg1, arg2, arg3));
6466 break;
6467 #ifdef TARGET_NR_profil
6468 case TARGET_NR_profil:
6469 goto unimplemented;
6470 #endif
6471 case TARGET_NR_statfs:
6472 if (!(p = lock_user_string(arg1)))
6473 goto efault;
6474 ret = get_errno(statfs(path(p), &stfs));
6475 unlock_user(p, arg1, 0);
6476 convert_statfs:
6477 if (!is_error(ret)) {
6478 struct target_statfs *target_stfs;
6479
6480 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6481 goto efault;
6482 __put_user(stfs.f_type, &target_stfs->f_type);
6483 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6484 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6485 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6486 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6487 __put_user(stfs.f_files, &target_stfs->f_files);
6488 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6489 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6490 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6491 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6492 unlock_user_struct(target_stfs, arg2, 1);
6493 }
6494 break;
6495 case TARGET_NR_fstatfs:
6496 ret = get_errno(fstatfs(arg1, &stfs));
6497 goto convert_statfs;
6498 #ifdef TARGET_NR_statfs64
6499 case TARGET_NR_statfs64:
6500 if (!(p = lock_user_string(arg1)))
6501 goto efault;
6502 ret = get_errno(statfs(path(p), &stfs));
6503 unlock_user(p, arg1, 0);
6504 convert_statfs64:
6505 if (!is_error(ret)) {
6506 struct target_statfs64 *target_stfs;
6507
6508 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6509 goto efault;
6510 __put_user(stfs.f_type, &target_stfs->f_type);
6511 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6512 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6513 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6514 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6515 __put_user(stfs.f_files, &target_stfs->f_files);
6516 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6517 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6518 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6519 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6520 unlock_user_struct(target_stfs, arg3, 1);
6521 }
6522 break;
6523 case TARGET_NR_fstatfs64:
6524 ret = get_errno(fstatfs(arg1, &stfs));
6525 goto convert_statfs64;
6526 #endif
6527 #ifdef TARGET_NR_ioperm
6528 case TARGET_NR_ioperm:
6529 goto unimplemented;
6530 #endif
6531 #ifdef TARGET_NR_socketcall
6532 case TARGET_NR_socketcall:
6533 ret = do_socketcall(arg1, arg2);
6534 break;
6535 #endif
6536 #ifdef TARGET_NR_accept
6537 case TARGET_NR_accept:
6538 ret = do_accept(arg1, arg2, arg3);
6539 break;
6540 #endif
6541 #ifdef TARGET_NR_bind
6542 case TARGET_NR_bind:
6543 ret = do_bind(arg1, arg2, arg3);
6544 break;
6545 #endif
6546 #ifdef TARGET_NR_connect
6547 case TARGET_NR_connect:
6548 ret = do_connect(arg1, arg2, arg3);
6549 break;
6550 #endif
6551 #ifdef TARGET_NR_getpeername
6552 case TARGET_NR_getpeername:
6553 ret = do_getpeername(arg1, arg2, arg3);
6554 break;
6555 #endif
6556 #ifdef TARGET_NR_getsockname
6557 case TARGET_NR_getsockname:
6558 ret = do_getsockname(arg1, arg2, arg3);
6559 break;
6560 #endif
6561 #ifdef TARGET_NR_getsockopt
6562 case TARGET_NR_getsockopt:
6563 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6564 break;
6565 #endif
6566 #ifdef TARGET_NR_listen
6567 case TARGET_NR_listen:
6568 ret = get_errno(listen(arg1, arg2));
6569 break;
6570 #endif
6571 #ifdef TARGET_NR_recv
6572 case TARGET_NR_recv:
6573 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6574 break;
6575 #endif
6576 #ifdef TARGET_NR_recvfrom
6577 case TARGET_NR_recvfrom:
6578 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6579 break;
6580 #endif
6581 #ifdef TARGET_NR_recvmsg
6582 case TARGET_NR_recvmsg:
6583 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6584 break;
6585 #endif
6586 #ifdef TARGET_NR_send
6587 case TARGET_NR_send:
6588 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6589 break;
6590 #endif
6591 #ifdef TARGET_NR_sendmsg
6592 case TARGET_NR_sendmsg:
6593 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6594 break;
6595 #endif
6596 #ifdef TARGET_NR_sendto
6597 case TARGET_NR_sendto:
6598 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6599 break;
6600 #endif
6601 #ifdef TARGET_NR_shutdown
6602 case TARGET_NR_shutdown:
6603 ret = get_errno(shutdown(arg1, arg2));
6604 break;
6605 #endif
6606 #ifdef TARGET_NR_socket
6607 case TARGET_NR_socket:
6608 ret = do_socket(arg1, arg2, arg3);
6609 break;
6610 #endif
6611 #ifdef TARGET_NR_socketpair
6612 case TARGET_NR_socketpair:
6613 ret = do_socketpair(arg1, arg2, arg3, arg4);
6614 break;
6615 #endif
6616 #ifdef TARGET_NR_setsockopt
6617 case TARGET_NR_setsockopt:
6618 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6619 break;
6620 #endif
6621
6622 case TARGET_NR_syslog:
6623 if (!(p = lock_user_string(arg2)))
6624 goto efault;
6625 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6626 unlock_user(p, arg2, 0);
6627 break;
6628
6629 case TARGET_NR_setitimer:
6630 {
6631 struct itimerval value, ovalue, *pvalue;
6632
6633 if (arg2) {
6634 pvalue = &value;
6635 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6636 || copy_from_user_timeval(&pvalue->it_value,
6637 arg2 + sizeof(struct target_timeval)))
6638 goto efault;
6639 } else {
6640 pvalue = NULL;
6641 }
6642 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6643 if (!is_error(ret) && arg3) {
6644 if (copy_to_user_timeval(arg3,
6645 &ovalue.it_interval)
6646 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6647 &ovalue.it_value))
6648 goto efault;
6649 }
6650 }
6651 break;
6652 case TARGET_NR_getitimer:
6653 {
6654 struct itimerval value;
6655
6656 ret = get_errno(getitimer(arg1, &value));
6657 if (!is_error(ret) && arg2) {
6658 if (copy_to_user_timeval(arg2,
6659 &value.it_interval)
6660 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6661 &value.it_value))
6662 goto efault;
6663 }
6664 }
6665 break;
6666 case TARGET_NR_stat:
6667 if (!(p = lock_user_string(arg1)))
6668 goto efault;
6669 ret = get_errno(stat(path(p), &st));
6670 unlock_user(p, arg1, 0);
6671 goto do_stat;
6672 case TARGET_NR_lstat:
6673 if (!(p = lock_user_string(arg1)))
6674 goto efault;
6675 ret = get_errno(lstat(path(p), &st));
6676 unlock_user(p, arg1, 0);
6677 goto do_stat;
6678 case TARGET_NR_fstat:
6679 {
6680 ret = get_errno(fstat(arg1, &st));
6681 do_stat:
6682 if (!is_error(ret)) {
6683 struct target_stat *target_st;
6684
6685 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6686 goto efault;
6687 memset(target_st, 0, sizeof(*target_st));
6688 __put_user(st.st_dev, &target_st->st_dev);
6689 __put_user(st.st_ino, &target_st->st_ino);
6690 __put_user(st.st_mode, &target_st->st_mode);
6691 __put_user(st.st_uid, &target_st->st_uid);
6692 __put_user(st.st_gid, &target_st->st_gid);
6693 __put_user(st.st_nlink, &target_st->st_nlink);
6694 __put_user(st.st_rdev, &target_st->st_rdev);
6695 __put_user(st.st_size, &target_st->st_size);
6696 __put_user(st.st_blksize, &target_st->st_blksize);
6697 __put_user(st.st_blocks, &target_st->st_blocks);
6698 __put_user(st.st_atime, &target_st->target_st_atime);
6699 __put_user(st.st_mtime, &target_st->target_st_mtime);
6700 __put_user(st.st_ctime, &target_st->target_st_ctime);
6701 unlock_user_struct(target_st, arg2, 1);
6702 }
6703 }
6704 break;
6705 #ifdef TARGET_NR_olduname
6706 case TARGET_NR_olduname:
6707 goto unimplemented;
6708 #endif
6709 #ifdef TARGET_NR_iopl
6710 case TARGET_NR_iopl:
6711 goto unimplemented;
6712 #endif
6713 case TARGET_NR_vhangup:
6714 ret = get_errno(vhangup());
6715 break;
6716 #ifdef TARGET_NR_idle
6717 case TARGET_NR_idle:
6718 goto unimplemented;
6719 #endif
6720 #ifdef TARGET_NR_syscall
6721 case TARGET_NR_syscall:
6722 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6723 arg6, arg7, arg8, 0);
6724 break;
6725 #endif
6726 case TARGET_NR_wait4:
6727 {
6728 int status;
6729 abi_long status_ptr = arg2;
6730 struct rusage rusage, *rusage_ptr;
6731 abi_ulong target_rusage = arg4;
6732 if (target_rusage)
6733 rusage_ptr = &rusage;
6734 else
6735 rusage_ptr = NULL;
6736 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6737 if (!is_error(ret)) {
6738 if (status_ptr && ret) {
6739 status = host_to_target_waitstatus(status);
6740 if (put_user_s32(status, status_ptr))
6741 goto efault;
6742 }
6743 if (target_rusage)
6744 host_to_target_rusage(target_rusage, &rusage);
6745 }
6746 }
6747 break;
6748 #ifdef TARGET_NR_swapoff
6749 case TARGET_NR_swapoff:
6750 if (!(p = lock_user_string(arg1)))
6751 goto efault;
6752 ret = get_errno(swapoff(p));
6753 unlock_user(p, arg1, 0);
6754 break;
6755 #endif
6756 case TARGET_NR_sysinfo:
6757 {
6758 struct target_sysinfo *target_value;
6759 struct sysinfo value;
6760 ret = get_errno(sysinfo(&value));
6761 if (!is_error(ret) && arg1)
6762 {
6763 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6764 goto efault;
6765 __put_user(value.uptime, &target_value->uptime);
6766 __put_user(value.loads[0], &target_value->loads[0]);
6767 __put_user(value.loads[1], &target_value->loads[1]);
6768 __put_user(value.loads[2], &target_value->loads[2]);
6769 __put_user(value.totalram, &target_value->totalram);
6770 __put_user(value.freeram, &target_value->freeram);
6771 __put_user(value.sharedram, &target_value->sharedram);
6772 __put_user(value.bufferram, &target_value->bufferram);
6773 __put_user(value.totalswap, &target_value->totalswap);
6774 __put_user(value.freeswap, &target_value->freeswap);
6775 __put_user(value.procs, &target_value->procs);
6776 __put_user(value.totalhigh, &target_value->totalhigh);
6777 __put_user(value.freehigh, &target_value->freehigh);
6778 __put_user(value.mem_unit, &target_value->mem_unit);
6779 unlock_user_struct(target_value, arg1, 1);
6780 }
6781 }
6782 break;
6783 #ifdef TARGET_NR_ipc
6784 case TARGET_NR_ipc:
6785 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6786 break;
6787 #endif
6788 #ifdef TARGET_NR_semget
6789 case TARGET_NR_semget:
6790 ret = get_errno(semget(arg1, arg2, arg3));
6791 break;
6792 #endif
6793 #ifdef TARGET_NR_semop
6794 case TARGET_NR_semop:
6795 ret = get_errno(do_semop(arg1, arg2, arg3));
6796 break;
6797 #endif
6798 #ifdef TARGET_NR_semctl
6799 case TARGET_NR_semctl:
6800 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6801 break;
6802 #endif
6803 #ifdef TARGET_NR_msgctl
6804 case TARGET_NR_msgctl:
6805 ret = do_msgctl(arg1, arg2, arg3);
6806 break;
6807 #endif
6808 #ifdef TARGET_NR_msgget
6809 case TARGET_NR_msgget:
6810 ret = get_errno(msgget(arg1, arg2));
6811 break;
6812 #endif
6813 #ifdef TARGET_NR_msgrcv
6814 case TARGET_NR_msgrcv:
6815 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6816 break;
6817 #endif
6818 #ifdef TARGET_NR_msgsnd
6819 case TARGET_NR_msgsnd:
6820 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6821 break;
6822 #endif
6823 #ifdef TARGET_NR_shmget
6824 case TARGET_NR_shmget:
6825 ret = get_errno(shmget(arg1, arg2, arg3));
6826 break;
6827 #endif
6828 #ifdef TARGET_NR_shmctl
6829 case TARGET_NR_shmctl:
6830 ret = do_shmctl(arg1, arg2, arg3);
6831 break;
6832 #endif
6833 #ifdef TARGET_NR_shmat
6834 case TARGET_NR_shmat:
6835 ret = do_shmat(arg1, arg2, arg3);
6836 break;
6837 #endif
6838 #ifdef TARGET_NR_shmdt
6839 case TARGET_NR_shmdt:
6840 ret = do_shmdt(arg1);
6841 break;
6842 #endif
6843 case TARGET_NR_fsync:
6844 ret = get_errno(fsync(arg1));
6845 break;
6846 case TARGET_NR_clone:
6847 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6848 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6849 #elif defined(TARGET_CRIS)
6850 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6851 #elif defined(TARGET_S390X)
6852 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6853 #else
6854 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6855 #endif
6856 break;
6857 #ifdef __NR_exit_group
6858 /* new thread calls */
6859 case TARGET_NR_exit_group:
6860 #ifdef TARGET_GPROF
6861 _mcleanup();
6862 #endif
6863 gdb_exit(cpu_env, arg1);
6864 ret = get_errno(exit_group(arg1));
6865 break;
6866 #endif
6867 case TARGET_NR_setdomainname:
6868 if (!(p = lock_user_string(arg1)))
6869 goto efault;
6870 ret = get_errno(setdomainname(p, arg2));
6871 unlock_user(p, arg1, 0);
6872 break;
6873 case TARGET_NR_uname:
6874 /* no need to transcode because we use the linux syscall */
6875 {
6876 struct new_utsname * buf;
6877
6878 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6879 goto efault;
6880 ret = get_errno(sys_uname(buf));
6881 if (!is_error(ret)) {
6882 /* Overrite the native machine name with whatever is being
6883 emulated. */
6884 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6885 /* Allow the user to override the reported release. */
6886 if (qemu_uname_release && *qemu_uname_release)
6887 strcpy (buf->release, qemu_uname_release);
6888 }
6889 unlock_user_struct(buf, arg1, 1);
6890 }
6891 break;
6892 #ifdef TARGET_I386
6893 case TARGET_NR_modify_ldt:
6894 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6895 break;
6896 #if !defined(TARGET_X86_64)
6897 case TARGET_NR_vm86old:
6898 goto unimplemented;
6899 case TARGET_NR_vm86:
6900 ret = do_vm86(cpu_env, arg1, arg2);
6901 break;
6902 #endif
6903 #endif
6904 case TARGET_NR_adjtimex:
6905 goto unimplemented;
6906 #ifdef TARGET_NR_create_module
6907 case TARGET_NR_create_module:
6908 #endif
6909 case TARGET_NR_init_module:
6910 case TARGET_NR_delete_module:
6911 #ifdef TARGET_NR_get_kernel_syms
6912 case TARGET_NR_get_kernel_syms:
6913 #endif
6914 goto unimplemented;
6915 case TARGET_NR_quotactl:
6916 goto unimplemented;
6917 case TARGET_NR_getpgid:
6918 ret = get_errno(getpgid(arg1));
6919 break;
6920 case TARGET_NR_fchdir:
6921 ret = get_errno(fchdir(arg1));
6922 break;
6923 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6924 case TARGET_NR_bdflush:
6925 goto unimplemented;
6926 #endif
6927 #ifdef TARGET_NR_sysfs
6928 case TARGET_NR_sysfs:
6929 goto unimplemented;
6930 #endif
6931 case TARGET_NR_personality:
6932 ret = get_errno(personality(arg1));
6933 break;
6934 #ifdef TARGET_NR_afs_syscall
6935 case TARGET_NR_afs_syscall:
6936 goto unimplemented;
6937 #endif
6938 #ifdef TARGET_NR__llseek /* Not on alpha */
6939 case TARGET_NR__llseek:
6940 {
6941 int64_t res;
6942 #if !defined(__NR_llseek)
6943 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6944 if (res == -1) {
6945 ret = get_errno(res);
6946 } else {
6947 ret = 0;
6948 }
6949 #else
6950 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6951 #endif
6952 if ((ret == 0) && put_user_s64(res, arg4)) {
6953 goto efault;
6954 }
6955 }
6956 break;
6957 #endif
6958 case TARGET_NR_getdents:
6959 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6960 {
6961 struct target_dirent *target_dirp;
6962 struct linux_dirent *dirp;
6963 abi_long count = arg3;
6964
6965 dirp = malloc(count);
6966 if (!dirp) {
6967 ret = -TARGET_ENOMEM;
6968 goto fail;
6969 }
6970
6971 ret = get_errno(sys_getdents(arg1, dirp, count));
6972 if (!is_error(ret)) {
6973 struct linux_dirent *de;
6974 struct target_dirent *tde;
6975 int len = ret;
6976 int reclen, treclen;
6977 int count1, tnamelen;
6978
6979 count1 = 0;
6980 de = dirp;
6981 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6982 goto efault;
6983 tde = target_dirp;
6984 while (len > 0) {
6985 reclen = de->d_reclen;
6986 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6987 tde->d_reclen = tswap16(treclen);
6988 tde->d_ino = tswapal(de->d_ino);
6989 tde->d_off = tswapal(de->d_off);
6990 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6991 if (tnamelen > 256)
6992 tnamelen = 256;
6993 /* XXX: may not be correct */
6994 pstrcpy(tde->d_name, tnamelen, de->d_name);
6995 de = (struct linux_dirent *)((char *)de + reclen);
6996 len -= reclen;
6997 tde = (struct target_dirent *)((char *)tde + treclen);
6998 count1 += treclen;
6999 }
7000 ret = count1;
7001 unlock_user(target_dirp, arg2, ret);
7002 }
7003 free(dirp);
7004 }
7005 #else
7006 {
7007 struct linux_dirent *dirp;
7008 abi_long count = arg3;
7009
7010 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7011 goto efault;
7012 ret = get_errno(sys_getdents(arg1, dirp, count));
7013 if (!is_error(ret)) {
7014 struct linux_dirent *de;
7015 int len = ret;
7016 int reclen;
7017 de = dirp;
7018 while (len > 0) {
7019 reclen = de->d_reclen;
7020 if (reclen > len)
7021 break;
7022 de->d_reclen = tswap16(reclen);
7023 tswapls(&de->d_ino);
7024 tswapls(&de->d_off);
7025 de = (struct linux_dirent *)((char *)de + reclen);
7026 len -= reclen;
7027 }
7028 }
7029 unlock_user(dirp, arg2, ret);
7030 }
7031 #endif
7032 break;
7033 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7034 case TARGET_NR_getdents64:
7035 {
7036 struct linux_dirent64 *dirp;
7037 abi_long count = arg3;
7038 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7039 goto efault;
7040 ret = get_errno(sys_getdents64(arg1, dirp, count));
7041 if (!is_error(ret)) {
7042 struct linux_dirent64 *de;
7043 int len = ret;
7044 int reclen;
7045 de = dirp;
7046 while (len > 0) {
7047 reclen = de->d_reclen;
7048 if (reclen > len)
7049 break;
7050 de->d_reclen = tswap16(reclen);
7051 tswap64s((uint64_t *)&de->d_ino);
7052 tswap64s((uint64_t *)&de->d_off);
7053 de = (struct linux_dirent64 *)((char *)de + reclen);
7054 len -= reclen;
7055 }
7056 }
7057 unlock_user(dirp, arg2, ret);
7058 }
7059 break;
7060 #endif /* TARGET_NR_getdents64 */
7061 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
7062 #ifdef TARGET_S390X
7063 case TARGET_NR_select:
7064 #else
7065 case TARGET_NR__newselect:
7066 #endif
7067 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7068 break;
7069 #endif
7070 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7071 # ifdef TARGET_NR_poll
7072 case TARGET_NR_poll:
7073 # endif
7074 # ifdef TARGET_NR_ppoll
7075 case TARGET_NR_ppoll:
7076 # endif
7077 {
7078 struct target_pollfd *target_pfd;
7079 unsigned int nfds = arg2;
7080 int timeout = arg3;
7081 struct pollfd *pfd;
7082 unsigned int i;
7083
7084 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7085 if (!target_pfd)
7086 goto efault;
7087
7088 pfd = alloca(sizeof(struct pollfd) * nfds);
7089 for(i = 0; i < nfds; i++) {
7090 pfd[i].fd = tswap32(target_pfd[i].fd);
7091 pfd[i].events = tswap16(target_pfd[i].events);
7092 }
7093
7094 # ifdef TARGET_NR_ppoll
7095 if (num == TARGET_NR_ppoll) {
7096 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7097 target_sigset_t *target_set;
7098 sigset_t _set, *set = &_set;
7099
7100 if (arg3) {
7101 if (target_to_host_timespec(timeout_ts, arg3)) {
7102 unlock_user(target_pfd, arg1, 0);
7103 goto efault;
7104 }
7105 } else {
7106 timeout_ts = NULL;
7107 }
7108
7109 if (arg4) {
7110 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7111 if (!target_set) {
7112 unlock_user(target_pfd, arg1, 0);
7113 goto efault;
7114 }
7115 target_to_host_sigset(set, target_set);
7116 } else {
7117 set = NULL;
7118 }
7119
7120 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7121
7122 if (!is_error(ret) && arg3) {
7123 host_to_target_timespec(arg3, timeout_ts);
7124 }
7125 if (arg4) {
7126 unlock_user(target_set, arg4, 0);
7127 }
7128 } else
7129 # endif
7130 ret = get_errno(poll(pfd, nfds, timeout));
7131
7132 if (!is_error(ret)) {
7133 for(i = 0; i < nfds; i++) {
7134 target_pfd[i].revents = tswap16(pfd[i].revents);
7135 }
7136 }
7137 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7138 }
7139 break;
7140 #endif
7141 case TARGET_NR_flock:
7142 /* NOTE: the flock constant seems to be the same for every
7143 Linux platform */
7144 ret = get_errno(flock(arg1, arg2));
7145 break;
7146 case TARGET_NR_readv:
7147 {
7148 int count = arg3;
7149 struct iovec *vec;
7150
7151 vec = alloca(count * sizeof(struct iovec));
7152 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
7153 goto efault;
7154 ret = get_errno(readv(arg1, vec, count));
7155 unlock_iovec(vec, arg2, count, 1);
7156 }
7157 break;
7158 case TARGET_NR_writev:
7159 {
7160 int count = arg3;
7161 struct iovec *vec;
7162
7163 vec = alloca(count * sizeof(struct iovec));
7164 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7165 goto efault;
7166 ret = get_errno(writev(arg1, vec, count));
7167 unlock_iovec(vec, arg2, count, 0);
7168 }
7169 break;
7170 case TARGET_NR_getsid:
7171 ret = get_errno(getsid(arg1));
7172 break;
7173 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7174 case TARGET_NR_fdatasync:
7175 ret = get_errno(fdatasync(arg1));
7176 break;
7177 #endif
7178 case TARGET_NR__sysctl:
7179 /* We don't implement this, but ENOTDIR is always a safe
7180 return value. */
7181 ret = -TARGET_ENOTDIR;
7182 break;
7183 case TARGET_NR_sched_getaffinity:
7184 {
7185 unsigned int mask_size;
7186 unsigned long *mask;
7187
7188 /*
7189 * sched_getaffinity needs multiples of ulong, so need to take
7190 * care of mismatches between target ulong and host ulong sizes.
7191 */
7192 if (arg2 & (sizeof(abi_ulong) - 1)) {
7193 ret = -TARGET_EINVAL;
7194 break;
7195 }
7196 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7197
7198 mask = alloca(mask_size);
7199 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7200
7201 if (!is_error(ret)) {
7202 if (copy_to_user(arg3, mask, ret)) {
7203 goto efault;
7204 }
7205 }
7206 }
7207 break;
7208 case TARGET_NR_sched_setaffinity:
7209 {
7210 unsigned int mask_size;
7211 unsigned long *mask;
7212
7213 /*
7214 * sched_setaffinity needs multiples of ulong, so need to take
7215 * care of mismatches between target ulong and host ulong sizes.
7216 */
7217 if (arg2 & (sizeof(abi_ulong) - 1)) {
7218 ret = -TARGET_EINVAL;
7219 break;
7220 }
7221 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7222
7223 mask = alloca(mask_size);
7224 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7225 goto efault;
7226 }
7227 memcpy(mask, p, arg2);
7228 unlock_user_struct(p, arg2, 0);
7229
7230 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7231 }
7232 break;
7233 case TARGET_NR_sched_setparam:
7234 {
7235 struct sched_param *target_schp;
7236 struct sched_param schp;
7237
7238 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7239 goto efault;
7240 schp.sched_priority = tswap32(target_schp->sched_priority);
7241 unlock_user_struct(target_schp, arg2, 0);
7242 ret = get_errno(sched_setparam(arg1, &schp));
7243 }
7244 break;
7245 case TARGET_NR_sched_getparam:
7246 {
7247 struct sched_param *target_schp;
7248 struct sched_param schp;
7249 ret = get_errno(sched_getparam(arg1, &schp));
7250 if (!is_error(ret)) {
7251 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7252 goto efault;
7253 target_schp->sched_priority = tswap32(schp.sched_priority);
7254 unlock_user_struct(target_schp, arg2, 1);
7255 }
7256 }
7257 break;
7258 case TARGET_NR_sched_setscheduler:
7259 {
7260 struct sched_param *target_schp;
7261 struct sched_param schp;
7262 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7263 goto efault;
7264 schp.sched_priority = tswap32(target_schp->sched_priority);
7265 unlock_user_struct(target_schp, arg3, 0);
7266 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7267 }
7268 break;
7269 case TARGET_NR_sched_getscheduler:
7270 ret = get_errno(sched_getscheduler(arg1));
7271 break;
7272 case TARGET_NR_sched_yield:
7273 ret = get_errno(sched_yield());
7274 break;
7275 case TARGET_NR_sched_get_priority_max:
7276 ret = get_errno(sched_get_priority_max(arg1));
7277 break;
7278 case TARGET_NR_sched_get_priority_min:
7279 ret = get_errno(sched_get_priority_min(arg1));
7280 break;
7281 case TARGET_NR_sched_rr_get_interval:
7282 {
7283 struct timespec ts;
7284 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7285 if (!is_error(ret)) {
7286 host_to_target_timespec(arg2, &ts);
7287 }
7288 }
7289 break;
7290 case TARGET_NR_nanosleep:
7291 {
7292 struct timespec req, rem;
7293 target_to_host_timespec(&req, arg1);
7294 ret = get_errno(nanosleep(&req, &rem));
7295 if (is_error(ret) && arg2) {
7296 host_to_target_timespec(arg2, &rem);
7297 }
7298 }
7299 break;
7300 #ifdef TARGET_NR_query_module
7301 case TARGET_NR_query_module:
7302 goto unimplemented;
7303 #endif
7304 #ifdef TARGET_NR_nfsservctl
7305 case TARGET_NR_nfsservctl:
7306 goto unimplemented;
7307 #endif
7308 case TARGET_NR_prctl:
7309 switch (arg1) {
7310 case PR_GET_PDEATHSIG:
7311 {
7312 int deathsig;
7313 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7314 if (!is_error(ret) && arg2
7315 && put_user_ual(deathsig, arg2)) {
7316 goto efault;
7317 }
7318 break;
7319 }
7320 #ifdef PR_GET_NAME
7321 case PR_GET_NAME:
7322 {
7323 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7324 if (!name) {
7325 goto efault;
7326 }
7327 ret = get_errno(prctl(arg1, (unsigned long)name,
7328 arg3, arg4, arg5));
7329 unlock_user(name, arg2, 16);
7330 break;
7331 }
7332 case PR_SET_NAME:
7333 {
7334 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7335 if (!name) {
7336 goto efault;
7337 }
7338 ret = get_errno(prctl(arg1, (unsigned long)name,
7339 arg3, arg4, arg5));
7340 unlock_user(name, arg2, 0);
7341 break;
7342 }
7343 #endif
7344 default:
7345 /* Most prctl options have no pointer arguments */
7346 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7347 break;
7348 }
7349 break;
7350 #ifdef TARGET_NR_arch_prctl
7351 case TARGET_NR_arch_prctl:
7352 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7353 ret = do_arch_prctl(cpu_env, arg1, arg2);
7354 break;
7355 #else
7356 goto unimplemented;
7357 #endif
7358 #endif
7359 #ifdef TARGET_NR_pread
7360 case TARGET_NR_pread:
7361 if (regpairs_aligned(cpu_env))
7362 arg4 = arg5;
7363 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7364 goto efault;
7365 ret = get_errno(pread(arg1, p, arg3, arg4));
7366 unlock_user(p, arg2, ret);
7367 break;
7368 case TARGET_NR_pwrite:
7369 if (regpairs_aligned(cpu_env))
7370 arg4 = arg5;
7371 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7372 goto efault;
7373 ret = get_errno(pwrite(arg1, p, arg3, arg4));
7374 unlock_user(p, arg2, 0);
7375 break;
7376 #endif
7377 #ifdef TARGET_NR_pread64
7378 case TARGET_NR_pread64:
7379 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7380 goto efault;
7381 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7382 unlock_user(p, arg2, ret);
7383 break;
7384 case TARGET_NR_pwrite64:
7385 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7386 goto efault;
7387 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7388 unlock_user(p, arg2, 0);
7389 break;
7390 #endif
7391 case TARGET_NR_getcwd:
7392 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7393 goto efault;
7394 ret = get_errno(sys_getcwd1(p, arg2));
7395 unlock_user(p, arg1, ret);
7396 break;
7397 case TARGET_NR_capget:
7398 goto unimplemented;
7399 case TARGET_NR_capset:
7400 goto unimplemented;
7401 case TARGET_NR_sigaltstack:
7402 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7403 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7404 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7405 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7406 break;
7407 #else
7408 goto unimplemented;
7409 #endif
7410 case TARGET_NR_sendfile:
7411 goto unimplemented;
7412 #ifdef TARGET_NR_getpmsg
7413 case TARGET_NR_getpmsg:
7414 goto unimplemented;
7415 #endif
7416 #ifdef TARGET_NR_putpmsg
7417 case TARGET_NR_putpmsg:
7418 goto unimplemented;
7419 #endif
7420 #ifdef TARGET_NR_vfork
7421 case TARGET_NR_vfork:
7422 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7423 0, 0, 0, 0));
7424 break;
7425 #endif
7426 #ifdef TARGET_NR_ugetrlimit
7427 case TARGET_NR_ugetrlimit:
7428 {
7429 struct rlimit rlim;
7430 int resource = target_to_host_resource(arg1);
7431 ret = get_errno(getrlimit(resource, &rlim));
7432 if (!is_error(ret)) {
7433 struct target_rlimit *target_rlim;
7434 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7435 goto efault;
7436 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7437 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7438 unlock_user_struct(target_rlim, arg2, 1);
7439 }
7440 break;
7441 }
7442 #endif
7443 #ifdef TARGET_NR_truncate64
7444 case TARGET_NR_truncate64:
7445 if (!(p = lock_user_string(arg1)))
7446 goto efault;
7447 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7448 unlock_user(p, arg1, 0);
7449 break;
7450 #endif
7451 #ifdef TARGET_NR_ftruncate64
7452 case TARGET_NR_ftruncate64:
7453 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7454 break;
7455 #endif
7456 #ifdef TARGET_NR_stat64
7457 case TARGET_NR_stat64:
7458 if (!(p = lock_user_string(arg1)))
7459 goto efault;
7460 ret = get_errno(stat(path(p), &st));
7461 unlock_user(p, arg1, 0);
7462 if (!is_error(ret))
7463 ret = host_to_target_stat64(cpu_env, arg2, &st);
7464 break;
7465 #endif
7466 #ifdef TARGET_NR_lstat64
7467 case TARGET_NR_lstat64:
7468 if (!(p = lock_user_string(arg1)))
7469 goto efault;
7470 ret = get_errno(lstat(path(p), &st));
7471 unlock_user(p, arg1, 0);
7472 if (!is_error(ret))
7473 ret = host_to_target_stat64(cpu_env, arg2, &st);
7474 break;
7475 #endif
7476 #ifdef TARGET_NR_fstat64
7477 case TARGET_NR_fstat64:
7478 ret = get_errno(fstat(arg1, &st));
7479 if (!is_error(ret))
7480 ret = host_to_target_stat64(cpu_env, arg2, &st);
7481 break;
7482 #endif
7483 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7484 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7485 #ifdef TARGET_NR_fstatat64
7486 case TARGET_NR_fstatat64:
7487 #endif
7488 #ifdef TARGET_NR_newfstatat
7489 case TARGET_NR_newfstatat:
7490 #endif
7491 if (!(p = lock_user_string(arg2)))
7492 goto efault;
7493 #ifdef __NR_fstatat64
7494 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7495 #else
7496 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7497 #endif
7498 if (!is_error(ret))
7499 ret = host_to_target_stat64(cpu_env, arg3, &st);
7500 break;
7501 #endif
7502 case TARGET_NR_lchown:
7503 if (!(p = lock_user_string(arg1)))
7504 goto efault;
7505 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7506 unlock_user(p, arg1, 0);
7507 break;
7508 #ifdef TARGET_NR_getuid
7509 case TARGET_NR_getuid:
7510 ret = get_errno(high2lowuid(getuid()));
7511 break;
7512 #endif
7513 #ifdef TARGET_NR_getgid
7514 case TARGET_NR_getgid:
7515 ret = get_errno(high2lowgid(getgid()));
7516 break;
7517 #endif
7518 #ifdef TARGET_NR_geteuid
7519 case TARGET_NR_geteuid:
7520 ret = get_errno(high2lowuid(geteuid()));
7521 break;
7522 #endif
7523 #ifdef TARGET_NR_getegid
7524 case TARGET_NR_getegid:
7525 ret = get_errno(high2lowgid(getegid()));
7526 break;
7527 #endif
7528 case TARGET_NR_setreuid:
7529 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7530 break;
7531 case TARGET_NR_setregid:
7532 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7533 break;
7534 case TARGET_NR_getgroups:
7535 {
7536 int gidsetsize = arg1;
7537 target_id *target_grouplist;
7538 gid_t *grouplist;
7539 int i;
7540
7541 grouplist = alloca(gidsetsize * sizeof(gid_t));
7542 ret = get_errno(getgroups(gidsetsize, grouplist));
7543 if (gidsetsize == 0)
7544 break;
7545 if (!is_error(ret)) {
7546 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7547 if (!target_grouplist)
7548 goto efault;
7549 for(i = 0;i < ret; i++)
7550 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7551 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7552 }
7553 }
7554 break;
7555 case TARGET_NR_setgroups:
7556 {
7557 int gidsetsize = arg1;
7558 target_id *target_grouplist;
7559 gid_t *grouplist;
7560 int i;
7561
7562 grouplist = alloca(gidsetsize * sizeof(gid_t));
7563 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7564 if (!target_grouplist) {
7565 ret = -TARGET_EFAULT;
7566 goto fail;
7567 }
7568 for(i = 0;i < gidsetsize; i++)
7569 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7570 unlock_user(target_grouplist, arg2, 0);
7571 ret = get_errno(setgroups(gidsetsize, grouplist));
7572 }
7573 break;
7574 case TARGET_NR_fchown:
7575 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7576 break;
7577 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7578 case TARGET_NR_fchownat:
7579 if (!(p = lock_user_string(arg2)))
7580 goto efault;
7581 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7582 unlock_user(p, arg2, 0);
7583 break;
7584 #endif
7585 #ifdef TARGET_NR_setresuid
7586 case TARGET_NR_setresuid:
7587 ret = get_errno(setresuid(low2highuid(arg1),
7588 low2highuid(arg2),
7589 low2highuid(arg3)));
7590 break;
7591 #endif
7592 #ifdef TARGET_NR_getresuid
7593 case TARGET_NR_getresuid:
7594 {
7595 uid_t ruid, euid, suid;
7596 ret = get_errno(getresuid(&ruid, &euid, &suid));
7597 if (!is_error(ret)) {
7598 if (put_user_u16(high2lowuid(ruid), arg1)
7599 || put_user_u16(high2lowuid(euid), arg2)
7600 || put_user_u16(high2lowuid(suid), arg3))
7601 goto efault;
7602 }
7603 }
7604 break;
7605 #endif
7606 #ifdef TARGET_NR_getresgid
7607 case TARGET_NR_setresgid:
7608 ret = get_errno(setresgid(low2highgid(arg1),
7609 low2highgid(arg2),
7610 low2highgid(arg3)));
7611 break;
7612 #endif
7613 #ifdef TARGET_NR_getresgid
7614 case TARGET_NR_getresgid:
7615 {
7616 gid_t rgid, egid, sgid;
7617 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7618 if (!is_error(ret)) {
7619 if (put_user_u16(high2lowgid(rgid), arg1)
7620 || put_user_u16(high2lowgid(egid), arg2)
7621 || put_user_u16(high2lowgid(sgid), arg3))
7622 goto efault;
7623 }
7624 }
7625 break;
7626 #endif
7627 case TARGET_NR_chown:
7628 if (!(p = lock_user_string(arg1)))
7629 goto efault;
7630 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7631 unlock_user(p, arg1, 0);
7632 break;
7633 case TARGET_NR_setuid:
7634 ret = get_errno(setuid(low2highuid(arg1)));
7635 break;
7636 case TARGET_NR_setgid:
7637 ret = get_errno(setgid(low2highgid(arg1)));
7638 break;
7639 case TARGET_NR_setfsuid:
7640 ret = get_errno(setfsuid(arg1));
7641 break;
7642 case TARGET_NR_setfsgid:
7643 ret = get_errno(setfsgid(arg1));
7644 break;
7645
7646 #ifdef TARGET_NR_lchown32
7647 case TARGET_NR_lchown32:
7648 if (!(p = lock_user_string(arg1)))
7649 goto efault;
7650 ret = get_errno(lchown(p, arg2, arg3));
7651 unlock_user(p, arg1, 0);
7652 break;
7653 #endif
7654 #ifdef TARGET_NR_getuid32
7655 case TARGET_NR_getuid32:
7656 ret = get_errno(getuid());
7657 break;
7658 #endif
7659
7660 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7661 /* Alpha specific */
7662 case TARGET_NR_getxuid:
7663 {
7664 uid_t euid;
7665 euid=geteuid();
7666 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7667 }
7668 ret = get_errno(getuid());
7669 break;
7670 #endif
7671 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7672 /* Alpha specific */
7673 case TARGET_NR_getxgid:
7674 {
7675 uid_t egid;
7676 egid=getegid();
7677 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7678 }
7679 ret = get_errno(getgid());
7680 break;
7681 #endif
7682 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7683 /* Alpha specific */
7684 case TARGET_NR_osf_getsysinfo:
7685 ret = -TARGET_EOPNOTSUPP;
7686 switch (arg1) {
7687 case TARGET_GSI_IEEE_FP_CONTROL:
7688 {
7689 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7690
7691 /* Copied from linux ieee_fpcr_to_swcr. */
7692 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7693 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7694 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7695 | SWCR_TRAP_ENABLE_DZE
7696 | SWCR_TRAP_ENABLE_OVF);
7697 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7698 | SWCR_TRAP_ENABLE_INE);
7699 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7700 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7701
7702 if (put_user_u64 (swcr, arg2))
7703 goto efault;
7704 ret = 0;
7705 }
7706 break;
7707
7708 /* case GSI_IEEE_STATE_AT_SIGNAL:
7709 -- Not implemented in linux kernel.
7710 case GSI_UACPROC:
7711 -- Retrieves current unaligned access state; not much used.
7712 case GSI_PROC_TYPE:
7713 -- Retrieves implver information; surely not used.
7714 case GSI_GET_HWRPB:
7715 -- Grabs a copy of the HWRPB; surely not used.
7716 */
7717 }
7718 break;
7719 #endif
7720 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7721 /* Alpha specific */
7722 case TARGET_NR_osf_setsysinfo:
7723 ret = -TARGET_EOPNOTSUPP;
7724 switch (arg1) {
7725 case TARGET_SSI_IEEE_FP_CONTROL:
7726 {
7727 uint64_t swcr, fpcr, orig_fpcr;
7728
7729 if (get_user_u64 (swcr, arg2)) {
7730 goto efault;
7731 }
7732 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7733 fpcr = orig_fpcr & FPCR_DYN_MASK;
7734
7735 /* Copied from linux ieee_swcr_to_fpcr. */
7736 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7737 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7738 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7739 | SWCR_TRAP_ENABLE_DZE
7740 | SWCR_TRAP_ENABLE_OVF)) << 48;
7741 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7742 | SWCR_TRAP_ENABLE_INE)) << 57;
7743 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7744 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7745
7746 cpu_alpha_store_fpcr(cpu_env, fpcr);
7747 ret = 0;
7748 }
7749 break;
7750
7751 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7752 {
7753 uint64_t exc, fpcr, orig_fpcr;
7754 int si_code;
7755
7756 if (get_user_u64(exc, arg2)) {
7757 goto efault;
7758 }
7759
7760 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7761
7762 /* We only add to the exception status here. */
7763 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7764
7765 cpu_alpha_store_fpcr(cpu_env, fpcr);
7766 ret = 0;
7767
7768 /* Old exceptions are not signaled. */
7769 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7770
7771 /* If any exceptions set by this call,
7772 and are unmasked, send a signal. */
7773 si_code = 0;
7774 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7775 si_code = TARGET_FPE_FLTRES;
7776 }
7777 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7778 si_code = TARGET_FPE_FLTUND;
7779 }
7780 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7781 si_code = TARGET_FPE_FLTOVF;
7782 }
7783 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7784 si_code = TARGET_FPE_FLTDIV;
7785 }
7786 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7787 si_code = TARGET_FPE_FLTINV;
7788 }
7789 if (si_code != 0) {
7790 target_siginfo_t info;
7791 info.si_signo = SIGFPE;
7792 info.si_errno = 0;
7793 info.si_code = si_code;
7794 info._sifields._sigfault._addr
7795 = ((CPUArchState *)cpu_env)->pc;
7796 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7797 }
7798 }
7799 break;
7800
7801 /* case SSI_NVPAIRS:
7802 -- Used with SSIN_UACPROC to enable unaligned accesses.
7803 case SSI_IEEE_STATE_AT_SIGNAL:
7804 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7805 -- Not implemented in linux kernel
7806 */
7807 }
7808 break;
7809 #endif
7810 #ifdef TARGET_NR_osf_sigprocmask
7811 /* Alpha specific. */
7812 case TARGET_NR_osf_sigprocmask:
7813 {
7814 abi_ulong mask;
7815 int how;
7816 sigset_t set, oldset;
7817
7818 switch(arg1) {
7819 case TARGET_SIG_BLOCK:
7820 how = SIG_BLOCK;
7821 break;
7822 case TARGET_SIG_UNBLOCK:
7823 how = SIG_UNBLOCK;
7824 break;
7825 case TARGET_SIG_SETMASK:
7826 how = SIG_SETMASK;
7827 break;
7828 default:
7829 ret = -TARGET_EINVAL;
7830 goto fail;
7831 }
7832 mask = arg2;
7833 target_to_host_old_sigset(&set, &mask);
7834 sigprocmask(how, &set, &oldset);
7835 host_to_target_old_sigset(&mask, &oldset);
7836 ret = mask;
7837 }
7838 break;
7839 #endif
7840
7841 #ifdef TARGET_NR_getgid32
7842 case TARGET_NR_getgid32:
7843 ret = get_errno(getgid());
7844 break;
7845 #endif
7846 #ifdef TARGET_NR_geteuid32
7847 case TARGET_NR_geteuid32:
7848 ret = get_errno(geteuid());
7849 break;
7850 #endif
7851 #ifdef TARGET_NR_getegid32
7852 case TARGET_NR_getegid32:
7853 ret = get_errno(getegid());
7854 break;
7855 #endif
7856 #ifdef TARGET_NR_setreuid32
7857 case TARGET_NR_setreuid32:
7858 ret = get_errno(setreuid(arg1, arg2));
7859 break;
7860 #endif
7861 #ifdef TARGET_NR_setregid32
7862 case TARGET_NR_setregid32:
7863 ret = get_errno(setregid(arg1, arg2));
7864 break;
7865 #endif
7866 #ifdef TARGET_NR_getgroups32
7867 case TARGET_NR_getgroups32:
7868 {
7869 int gidsetsize = arg1;
7870 uint32_t *target_grouplist;
7871 gid_t *grouplist;
7872 int i;
7873
7874 grouplist = alloca(gidsetsize * sizeof(gid_t));
7875 ret = get_errno(getgroups(gidsetsize, grouplist));
7876 if (gidsetsize == 0)
7877 break;
7878 if (!is_error(ret)) {
7879 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7880 if (!target_grouplist) {
7881 ret = -TARGET_EFAULT;
7882 goto fail;
7883 }
7884 for(i = 0;i < ret; i++)
7885 target_grouplist[i] = tswap32(grouplist[i]);
7886 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7887 }
7888 }
7889 break;
7890 #endif
7891 #ifdef TARGET_NR_setgroups32
7892 case TARGET_NR_setgroups32:
7893 {
7894 int gidsetsize = arg1;
7895 uint32_t *target_grouplist;
7896 gid_t *grouplist;
7897 int i;
7898
7899 grouplist = alloca(gidsetsize * sizeof(gid_t));
7900 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7901 if (!target_grouplist) {
7902 ret = -TARGET_EFAULT;
7903 goto fail;
7904 }
7905 for(i = 0;i < gidsetsize; i++)
7906 grouplist[i] = tswap32(target_grouplist[i]);
7907 unlock_user(target_grouplist, arg2, 0);
7908 ret = get_errno(setgroups(gidsetsize, grouplist));
7909 }
7910 break;
7911 #endif
7912 #ifdef TARGET_NR_fchown32
7913 case TARGET_NR_fchown32:
7914 ret = get_errno(fchown(arg1, arg2, arg3));
7915 break;
7916 #endif
7917 #ifdef TARGET_NR_setresuid32
7918 case TARGET_NR_setresuid32:
7919 ret = get_errno(setresuid(arg1, arg2, arg3));
7920 break;
7921 #endif
7922 #ifdef TARGET_NR_getresuid32
7923 case TARGET_NR_getresuid32:
7924 {
7925 uid_t ruid, euid, suid;
7926 ret = get_errno(getresuid(&ruid, &euid, &suid));
7927 if (!is_error(ret)) {
7928 if (put_user_u32(ruid, arg1)
7929 || put_user_u32(euid, arg2)
7930 || put_user_u32(suid, arg3))
7931 goto efault;
7932 }
7933 }
7934 break;
7935 #endif
7936 #ifdef TARGET_NR_setresgid32
7937 case TARGET_NR_setresgid32:
7938 ret = get_errno(setresgid(arg1, arg2, arg3));
7939 break;
7940 #endif
7941 #ifdef TARGET_NR_getresgid32
7942 case TARGET_NR_getresgid32:
7943 {
7944 gid_t rgid, egid, sgid;
7945 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7946 if (!is_error(ret)) {
7947 if (put_user_u32(rgid, arg1)
7948 || put_user_u32(egid, arg2)
7949 || put_user_u32(sgid, arg3))
7950 goto efault;
7951 }
7952 }
7953 break;
7954 #endif
7955 #ifdef TARGET_NR_chown32
7956 case TARGET_NR_chown32:
7957 if (!(p = lock_user_string(arg1)))
7958 goto efault;
7959 ret = get_errno(chown(p, arg2, arg3));
7960 unlock_user(p, arg1, 0);
7961 break;
7962 #endif
7963 #ifdef TARGET_NR_setuid32
7964 case TARGET_NR_setuid32:
7965 ret = get_errno(setuid(arg1));
7966 break;
7967 #endif
7968 #ifdef TARGET_NR_setgid32
7969 case TARGET_NR_setgid32:
7970 ret = get_errno(setgid(arg1));
7971 break;
7972 #endif
7973 #ifdef TARGET_NR_setfsuid32
7974 case TARGET_NR_setfsuid32:
7975 ret = get_errno(setfsuid(arg1));
7976 break;
7977 #endif
7978 #ifdef TARGET_NR_setfsgid32
7979 case TARGET_NR_setfsgid32:
7980 ret = get_errno(setfsgid(arg1));
7981 break;
7982 #endif
7983
7984 case TARGET_NR_pivot_root:
7985 goto unimplemented;
7986 #ifdef TARGET_NR_mincore
7987 case TARGET_NR_mincore:
7988 {
7989 void *a;
7990 ret = -TARGET_EFAULT;
7991 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7992 goto efault;
7993 if (!(p = lock_user_string(arg3)))
7994 goto mincore_fail;
7995 ret = get_errno(mincore(a, arg2, p));
7996 unlock_user(p, arg3, ret);
7997 mincore_fail:
7998 unlock_user(a, arg1, 0);
7999 }
8000 break;
8001 #endif
8002 #ifdef TARGET_NR_arm_fadvise64_64
8003 case TARGET_NR_arm_fadvise64_64:
8004 {
8005 /*
8006 * arm_fadvise64_64 looks like fadvise64_64 but
8007 * with different argument order
8008 */
8009 abi_long temp;
8010 temp = arg3;
8011 arg3 = arg4;
8012 arg4 = temp;
8013 }
8014 #endif
8015 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8016 #ifdef TARGET_NR_fadvise64_64
8017 case TARGET_NR_fadvise64_64:
8018 #endif
8019 #ifdef TARGET_NR_fadvise64
8020 case TARGET_NR_fadvise64:
8021 #endif
8022 #ifdef TARGET_S390X
8023 switch (arg4) {
8024 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8025 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8026 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8027 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8028 default: break;
8029 }
8030 #endif
8031 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8032 break;
8033 #endif
8034 #ifdef TARGET_NR_madvise
8035 case TARGET_NR_madvise:
8036 /* A straight passthrough may not be safe because qemu sometimes
8037 turns private flie-backed mappings into anonymous mappings.
8038 This will break MADV_DONTNEED.
8039 This is a hint, so ignoring and returning success is ok. */
8040 ret = get_errno(0);
8041 break;
8042 #endif
8043 #if TARGET_ABI_BITS == 32
8044 case TARGET_NR_fcntl64:
8045 {
8046 int cmd;
8047 struct flock64 fl;
8048 struct target_flock64 *target_fl;
8049 #ifdef TARGET_ARM
8050 struct target_eabi_flock64 *target_efl;
8051 #endif
8052
8053 cmd = target_to_host_fcntl_cmd(arg2);
8054 if (cmd == -TARGET_EINVAL) {
8055 ret = cmd;
8056 break;
8057 }
8058
8059 switch(arg2) {
8060 case TARGET_F_GETLK64:
8061 #ifdef TARGET_ARM
8062 if (((CPUARMState *)cpu_env)->eabi) {
8063 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8064 goto efault;
8065 fl.l_type = tswap16(target_efl->l_type);
8066 fl.l_whence = tswap16(target_efl->l_whence);
8067 fl.l_start = tswap64(target_efl->l_start);
8068 fl.l_len = tswap64(target_efl->l_len);
8069 fl.l_pid = tswap32(target_efl->l_pid);
8070 unlock_user_struct(target_efl, arg3, 0);
8071 } else
8072 #endif
8073 {
8074 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8075 goto efault;
8076 fl.l_type = tswap16(target_fl->l_type);
8077 fl.l_whence = tswap16(target_fl->l_whence);
8078 fl.l_start = tswap64(target_fl->l_start);
8079 fl.l_len = tswap64(target_fl->l_len);
8080 fl.l_pid = tswap32(target_fl->l_pid);
8081 unlock_user_struct(target_fl, arg3, 0);
8082 }
8083 ret = get_errno(fcntl(arg1, cmd, &fl));
8084 if (ret == 0) {
8085 #ifdef TARGET_ARM
8086 if (((CPUARMState *)cpu_env)->eabi) {
8087 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8088 goto efault;
8089 target_efl->l_type = tswap16(fl.l_type);
8090 target_efl->l_whence = tswap16(fl.l_whence);
8091 target_efl->l_start = tswap64(fl.l_start);
8092 target_efl->l_len = tswap64(fl.l_len);
8093 target_efl->l_pid = tswap32(fl.l_pid);
8094 unlock_user_struct(target_efl, arg3, 1);
8095 } else
8096 #endif
8097 {
8098 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8099 goto efault;
8100 target_fl->l_type = tswap16(fl.l_type);
8101 target_fl->l_whence = tswap16(fl.l_whence);
8102 target_fl->l_start = tswap64(fl.l_start);
8103 target_fl->l_len = tswap64(fl.l_len);
8104 target_fl->l_pid = tswap32(fl.l_pid);
8105 unlock_user_struct(target_fl, arg3, 1);
8106 }
8107 }
8108 break;
8109
8110 case TARGET_F_SETLK64:
8111 case TARGET_F_SETLKW64:
8112 #ifdef TARGET_ARM
8113 if (((CPUARMState *)cpu_env)->eabi) {
8114 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8115 goto efault;
8116 fl.l_type = tswap16(target_efl->l_type);
8117 fl.l_whence = tswap16(target_efl->l_whence);
8118 fl.l_start = tswap64(target_efl->l_start);
8119 fl.l_len = tswap64(target_efl->l_len);
8120 fl.l_pid = tswap32(target_efl->l_pid);
8121 unlock_user_struct(target_efl, arg3, 0);
8122 } else
8123 #endif
8124 {
8125 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8126 goto efault;
8127 fl.l_type = tswap16(target_fl->l_type);
8128 fl.l_whence = tswap16(target_fl->l_whence);
8129 fl.l_start = tswap64(target_fl->l_start);
8130 fl.l_len = tswap64(target_fl->l_len);
8131 fl.l_pid = tswap32(target_fl->l_pid);
8132 unlock_user_struct(target_fl, arg3, 0);
8133 }
8134 ret = get_errno(fcntl(arg1, cmd, &fl));
8135 break;
8136 default:
8137 ret = do_fcntl(arg1, arg2, arg3);
8138 break;
8139 }
8140 break;
8141 }
8142 #endif
8143 #ifdef TARGET_NR_cacheflush
8144 case TARGET_NR_cacheflush:
8145 /* self-modifying code is handled automatically, so nothing needed */
8146 ret = 0;
8147 break;
8148 #endif
8149 #ifdef TARGET_NR_security
8150 case TARGET_NR_security:
8151 goto unimplemented;
8152 #endif
8153 #ifdef TARGET_NR_getpagesize
8154 case TARGET_NR_getpagesize:
8155 ret = TARGET_PAGE_SIZE;
8156 break;
8157 #endif
8158 case TARGET_NR_gettid:
8159 ret = get_errno(gettid());
8160 break;
8161 #ifdef TARGET_NR_readahead
8162 case TARGET_NR_readahead:
8163 #if TARGET_ABI_BITS == 32
8164 if (regpairs_aligned(cpu_env)) {
8165 arg2 = arg3;
8166 arg3 = arg4;
8167 arg4 = arg5;
8168 }
8169 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8170 #else
8171 ret = get_errno(readahead(arg1, arg2, arg3));
8172 #endif
8173 break;
8174 #endif
8175 #ifdef CONFIG_ATTR
8176 #ifdef TARGET_NR_setxattr
8177 case TARGET_NR_listxattr:
8178 case TARGET_NR_llistxattr:
8179 {
8180 void *p, *b = 0;
8181 if (arg2) {
8182 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8183 if (!b) {
8184 ret = -TARGET_EFAULT;
8185 break;
8186 }
8187 }
8188 p = lock_user_string(arg1);
8189 if (p) {
8190 if (num == TARGET_NR_listxattr) {
8191 ret = get_errno(listxattr(p, b, arg3));
8192 } else {
8193 ret = get_errno(llistxattr(p, b, arg3));
8194 }
8195 } else {
8196 ret = -TARGET_EFAULT;
8197 }
8198 unlock_user(p, arg1, 0);
8199 unlock_user(b, arg2, arg3);
8200 break;
8201 }
8202 case TARGET_NR_flistxattr:
8203 {
8204 void *b = 0;
8205 if (arg2) {
8206 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8207 if (!b) {
8208 ret = -TARGET_EFAULT;
8209 break;
8210 }
8211 }
8212 ret = get_errno(flistxattr(arg1, b, arg3));
8213 unlock_user(b, arg2, arg3);
8214 break;
8215 }
8216 case TARGET_NR_setxattr:
8217 case TARGET_NR_lsetxattr:
8218 {
8219 void *p, *n, *v = 0;
8220 if (arg3) {
8221 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8222 if (!v) {
8223 ret = -TARGET_EFAULT;
8224 break;
8225 }
8226 }
8227 p = lock_user_string(arg1);
8228 n = lock_user_string(arg2);
8229 if (p && n) {
8230 if (num == TARGET_NR_setxattr) {
8231 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8232 } else {
8233 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8234 }
8235 } else {
8236 ret = -TARGET_EFAULT;
8237 }
8238 unlock_user(p, arg1, 0);
8239 unlock_user(n, arg2, 0);
8240 unlock_user(v, arg3, 0);
8241 }
8242 break;
8243 case TARGET_NR_fsetxattr:
8244 {
8245 void *n, *v = 0;
8246 if (arg3) {
8247 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8248 if (!v) {
8249 ret = -TARGET_EFAULT;
8250 break;
8251 }
8252 }
8253 n = lock_user_string(arg2);
8254 if (n) {
8255 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8256 } else {
8257 ret = -TARGET_EFAULT;
8258 }
8259 unlock_user(n, arg2, 0);
8260 unlock_user(v, arg3, 0);
8261 }
8262 break;
8263 case TARGET_NR_getxattr:
8264 case TARGET_NR_lgetxattr:
8265 {
8266 void *p, *n, *v = 0;
8267 if (arg3) {
8268 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8269 if (!v) {
8270 ret = -TARGET_EFAULT;
8271 break;
8272 }
8273 }
8274 p = lock_user_string(arg1);
8275 n = lock_user_string(arg2);
8276 if (p && n) {
8277 if (num == TARGET_NR_getxattr) {
8278 ret = get_errno(getxattr(p, n, v, arg4));
8279 } else {
8280 ret = get_errno(lgetxattr(p, n, v, arg4));
8281 }
8282 } else {
8283 ret = -TARGET_EFAULT;
8284 }
8285 unlock_user(p, arg1, 0);
8286 unlock_user(n, arg2, 0);
8287 unlock_user(v, arg3, arg4);
8288 }
8289 break;
8290 case TARGET_NR_fgetxattr:
8291 {
8292 void *n, *v = 0;
8293 if (arg3) {
8294 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8295 if (!v) {
8296 ret = -TARGET_EFAULT;
8297 break;
8298 }
8299 }
8300 n = lock_user_string(arg2);
8301 if (n) {
8302 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8303 } else {
8304 ret = -TARGET_EFAULT;
8305 }
8306 unlock_user(n, arg2, 0);
8307 unlock_user(v, arg3, arg4);
8308 }
8309 break;
8310 case TARGET_NR_removexattr:
8311 case TARGET_NR_lremovexattr:
8312 {
8313 void *p, *n;
8314 p = lock_user_string(arg1);
8315 n = lock_user_string(arg2);
8316 if (p && n) {
8317 if (num == TARGET_NR_removexattr) {
8318 ret = get_errno(removexattr(p, n));
8319 } else {
8320 ret = get_errno(lremovexattr(p, n));
8321 }
8322 } else {
8323 ret = -TARGET_EFAULT;
8324 }
8325 unlock_user(p, arg1, 0);
8326 unlock_user(n, arg2, 0);
8327 }
8328 break;
8329 case TARGET_NR_fremovexattr:
8330 {
8331 void *n;
8332 n = lock_user_string(arg2);
8333 if (n) {
8334 ret = get_errno(fremovexattr(arg1, n));
8335 } else {
8336 ret = -TARGET_EFAULT;
8337 }
8338 unlock_user(n, arg2, 0);
8339 }
8340 break;
8341 #endif
8342 #endif /* CONFIG_ATTR */
8343 #ifdef TARGET_NR_set_thread_area
8344 case TARGET_NR_set_thread_area:
8345 #if defined(TARGET_MIPS)
8346 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8347 ret = 0;
8348 break;
8349 #elif defined(TARGET_CRIS)
8350 if (arg1 & 0xff)
8351 ret = -TARGET_EINVAL;
8352 else {
8353 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8354 ret = 0;
8355 }
8356 break;
8357 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8358 ret = do_set_thread_area(cpu_env, arg1);
8359 break;
8360 #else
8361 goto unimplemented_nowarn;
8362 #endif
8363 #endif
8364 #ifdef TARGET_NR_get_thread_area
8365 case TARGET_NR_get_thread_area:
8366 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8367 ret = do_get_thread_area(cpu_env, arg1);
8368 #else
8369 goto unimplemented_nowarn;
8370 #endif
8371 #endif
8372 #ifdef TARGET_NR_getdomainname
8373 case TARGET_NR_getdomainname:
8374 goto unimplemented_nowarn;
8375 #endif
8376
8377 #ifdef TARGET_NR_clock_gettime
8378 case TARGET_NR_clock_gettime:
8379 {
8380 struct timespec ts;
8381 ret = get_errno(clock_gettime(arg1, &ts));
8382 if (!is_error(ret)) {
8383 host_to_target_timespec(arg2, &ts);
8384 }
8385 break;
8386 }
8387 #endif
8388 #ifdef TARGET_NR_clock_getres
8389 case TARGET_NR_clock_getres:
8390 {
8391 struct timespec ts;
8392 ret = get_errno(clock_getres(arg1, &ts));
8393 if (!is_error(ret)) {
8394 host_to_target_timespec(arg2, &ts);
8395 }
8396 break;
8397 }
8398 #endif
8399 #ifdef TARGET_NR_clock_nanosleep
8400 case TARGET_NR_clock_nanosleep:
8401 {
8402 struct timespec ts;
8403 target_to_host_timespec(&ts, arg3);
8404 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8405 if (arg4)
8406 host_to_target_timespec(arg4, &ts);
8407 break;
8408 }
8409 #endif
8410
8411 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8412 case TARGET_NR_set_tid_address:
8413 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8414 break;
8415 #endif
8416
8417 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8418 case TARGET_NR_tkill:
8419 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8420 break;
8421 #endif
8422
8423 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8424 case TARGET_NR_tgkill:
8425 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8426 target_to_host_signal(arg3)));
8427 break;
8428 #endif
8429
8430 #ifdef TARGET_NR_set_robust_list
8431 case TARGET_NR_set_robust_list:
8432 goto unimplemented_nowarn;
8433 #endif
8434
8435 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8436 case TARGET_NR_utimensat:
8437 {
8438 struct timespec *tsp, ts[2];
8439 if (!arg3) {
8440 tsp = NULL;
8441 } else {
8442 target_to_host_timespec(ts, arg3);
8443 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8444 tsp = ts;
8445 }
8446 if (!arg2)
8447 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8448 else {
8449 if (!(p = lock_user_string(arg2))) {
8450 ret = -TARGET_EFAULT;
8451 goto fail;
8452 }
8453 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8454 unlock_user(p, arg2, 0);
8455 }
8456 }
8457 break;
8458 #endif
8459 #if defined(CONFIG_USE_NPTL)
8460 case TARGET_NR_futex:
8461 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8462 break;
8463 #endif
8464 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8465 case TARGET_NR_inotify_init:
8466 ret = get_errno(sys_inotify_init());
8467 break;
8468 #endif
8469 #ifdef CONFIG_INOTIFY1
8470 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8471 case TARGET_NR_inotify_init1:
8472 ret = get_errno(sys_inotify_init1(arg1));
8473 break;
8474 #endif
8475 #endif
8476 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8477 case TARGET_NR_inotify_add_watch:
8478 p = lock_user_string(arg2);
8479 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8480 unlock_user(p, arg2, 0);
8481 break;
8482 #endif
8483 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8484 case TARGET_NR_inotify_rm_watch:
8485 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8486 break;
8487 #endif
8488
8489 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8490 case TARGET_NR_mq_open:
8491 {
8492 struct mq_attr posix_mq_attr;
8493
8494 p = lock_user_string(arg1 - 1);
8495 if (arg4 != 0)
8496 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8497 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8498 unlock_user (p, arg1, 0);
8499 }
8500 break;
8501
8502 case TARGET_NR_mq_unlink:
8503 p = lock_user_string(arg1 - 1);
8504 ret = get_errno(mq_unlink(p));
8505 unlock_user (p, arg1, 0);
8506 break;
8507
8508 case TARGET_NR_mq_timedsend:
8509 {
8510 struct timespec ts;
8511
8512 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8513 if (arg5 != 0) {
8514 target_to_host_timespec(&ts, arg5);
8515 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8516 host_to_target_timespec(arg5, &ts);
8517 }
8518 else
8519 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8520 unlock_user (p, arg2, arg3);
8521 }
8522 break;
8523
8524 case TARGET_NR_mq_timedreceive:
8525 {
8526 struct timespec ts;
8527 unsigned int prio;
8528
8529 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8530 if (arg5 != 0) {
8531 target_to_host_timespec(&ts, arg5);
8532 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8533 host_to_target_timespec(arg5, &ts);
8534 }
8535 else
8536 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8537 unlock_user (p, arg2, arg3);
8538 if (arg4 != 0)
8539 put_user_u32(prio, arg4);
8540 }
8541 break;
8542
8543 /* Not implemented for now... */
8544 /* case TARGET_NR_mq_notify: */
8545 /* break; */
8546
8547 case TARGET_NR_mq_getsetattr:
8548 {
8549 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8550 ret = 0;
8551 if (arg3 != 0) {
8552 ret = mq_getattr(arg1, &posix_mq_attr_out);
8553 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8554 }
8555 if (arg2 != 0) {
8556 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8557 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8558 }
8559
8560 }
8561 break;
8562 #endif
8563
8564 #ifdef CONFIG_SPLICE
8565 #ifdef TARGET_NR_tee
8566 case TARGET_NR_tee:
8567 {
8568 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8569 }
8570 break;
8571 #endif
8572 #ifdef TARGET_NR_splice
8573 case TARGET_NR_splice:
8574 {
8575 loff_t loff_in, loff_out;
8576 loff_t *ploff_in = NULL, *ploff_out = NULL;
8577 if(arg2) {
8578 get_user_u64(loff_in, arg2);
8579 ploff_in = &loff_in;
8580 }
8581 if(arg4) {
8582 get_user_u64(loff_out, arg2);
8583 ploff_out = &loff_out;
8584 }
8585 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8586 }
8587 break;
8588 #endif
8589 #ifdef TARGET_NR_vmsplice
8590 case TARGET_NR_vmsplice:
8591 {
8592 int count = arg3;
8593 struct iovec *vec;
8594
8595 vec = alloca(count * sizeof(struct iovec));
8596 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
8597 goto efault;
8598 ret = get_errno(vmsplice(arg1, vec, count, arg4));
8599 unlock_iovec(vec, arg2, count, 0);
8600 }
8601 break;
8602 #endif
8603 #endif /* CONFIG_SPLICE */
8604 #ifdef CONFIG_EVENTFD
8605 #if defined(TARGET_NR_eventfd)
8606 case TARGET_NR_eventfd:
8607 ret = get_errno(eventfd(arg1, 0));
8608 break;
8609 #endif
8610 #if defined(TARGET_NR_eventfd2)
8611 case TARGET_NR_eventfd2:
8612 ret = get_errno(eventfd(arg1, arg2));
8613 break;
8614 #endif
8615 #endif /* CONFIG_EVENTFD */
8616 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8617 case TARGET_NR_fallocate:
8618 #if TARGET_ABI_BITS == 32
8619 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8620 target_offset64(arg5, arg6)));
8621 #else
8622 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8623 #endif
8624 break;
8625 #endif
8626 #if defined(CONFIG_SYNC_FILE_RANGE)
8627 #if defined(TARGET_NR_sync_file_range)
8628 case TARGET_NR_sync_file_range:
8629 #if TARGET_ABI_BITS == 32
8630 #if defined(TARGET_MIPS)
8631 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8632 target_offset64(arg5, arg6), arg7));
8633 #else
8634 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8635 target_offset64(arg4, arg5), arg6));
8636 #endif /* !TARGET_MIPS */
8637 #else
8638 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8639 #endif
8640 break;
8641 #endif
8642 #if defined(TARGET_NR_sync_file_range2)
8643 case TARGET_NR_sync_file_range2:
8644 /* This is like sync_file_range but the arguments are reordered */
8645 #if TARGET_ABI_BITS == 32
8646 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8647 target_offset64(arg5, arg6), arg2));
8648 #else
8649 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8650 #endif
8651 break;
8652 #endif
8653 #endif
8654 #if defined(CONFIG_EPOLL)
8655 #if defined(TARGET_NR_epoll_create)
8656 case TARGET_NR_epoll_create:
8657 ret = get_errno(epoll_create(arg1));
8658 break;
8659 #endif
8660 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8661 case TARGET_NR_epoll_create1:
8662 ret = get_errno(epoll_create1(arg1));
8663 break;
8664 #endif
8665 #if defined(TARGET_NR_epoll_ctl)
8666 case TARGET_NR_epoll_ctl:
8667 {
8668 struct epoll_event ep;
8669 struct epoll_event *epp = 0;
8670 if (arg4) {
8671 struct target_epoll_event *target_ep;
8672 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8673 goto efault;
8674 }
8675 ep.events = tswap32(target_ep->events);
8676 /* The epoll_data_t union is just opaque data to the kernel,
8677 * so we transfer all 64 bits across and need not worry what
8678 * actual data type it is.
8679 */
8680 ep.data.u64 = tswap64(target_ep->data.u64);
8681 unlock_user_struct(target_ep, arg4, 0);
8682 epp = &ep;
8683 }
8684 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8685 break;
8686 }
8687 #endif
8688
8689 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8690 #define IMPLEMENT_EPOLL_PWAIT
8691 #endif
8692 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8693 #if defined(TARGET_NR_epoll_wait)
8694 case TARGET_NR_epoll_wait:
8695 #endif
8696 #if defined(IMPLEMENT_EPOLL_PWAIT)
8697 case TARGET_NR_epoll_pwait:
8698 #endif
8699 {
8700 struct target_epoll_event *target_ep;
8701 struct epoll_event *ep;
8702 int epfd = arg1;
8703 int maxevents = arg3;
8704 int timeout = arg4;
8705
8706 target_ep = lock_user(VERIFY_WRITE, arg2,
8707 maxevents * sizeof(struct target_epoll_event), 1);
8708 if (!target_ep) {
8709 goto efault;
8710 }
8711
8712 ep = alloca(maxevents * sizeof(struct epoll_event));
8713
8714 switch (num) {
8715 #if defined(IMPLEMENT_EPOLL_PWAIT)
8716 case TARGET_NR_epoll_pwait:
8717 {
8718 target_sigset_t *target_set;
8719 sigset_t _set, *set = &_set;
8720
8721 if (arg5) {
8722 target_set = lock_user(VERIFY_READ, arg5,
8723 sizeof(target_sigset_t), 1);
8724 if (!target_set) {
8725 unlock_user(target_ep, arg2, 0);
8726 goto efault;
8727 }
8728 target_to_host_sigset(set, target_set);
8729 unlock_user(target_set, arg5, 0);
8730 } else {
8731 set = NULL;
8732 }
8733
8734 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8735 break;
8736 }
8737 #endif
8738 #if defined(TARGET_NR_epoll_wait)
8739 case TARGET_NR_epoll_wait:
8740 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8741 break;
8742 #endif
8743 default:
8744 ret = -TARGET_ENOSYS;
8745 }
8746 if (!is_error(ret)) {
8747 int i;
8748 for (i = 0; i < ret; i++) {
8749 target_ep[i].events = tswap32(ep[i].events);
8750 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8751 }
8752 }
8753 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8754 break;
8755 }
8756 #endif
8757 #endif
8758 #ifdef TARGET_NR_prlimit64
8759 case TARGET_NR_prlimit64:
8760 {
8761 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8762 struct target_rlimit64 *target_rnew, *target_rold;
8763 struct host_rlimit64 rnew, rold, *rnewp = 0;
8764 if (arg3) {
8765 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8766 goto efault;
8767 }
8768 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8769 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8770 unlock_user_struct(target_rnew, arg3, 0);
8771 rnewp = &rnew;
8772 }
8773
8774 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8775 if (!is_error(ret) && arg4) {
8776 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8777 goto efault;
8778 }
8779 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8780 target_rold->rlim_max = tswap64(rold.rlim_max);
8781 unlock_user_struct(target_rold, arg4, 1);
8782 }
8783 break;
8784 }
8785 #endif
8786 default:
8787 unimplemented:
8788 gemu_log("qemu: Unsupported syscall: %d\n", num);
8789 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8790 unimplemented_nowarn:
8791 #endif
8792 ret = -TARGET_ENOSYS;
8793 break;
8794 }
8795 fail:
8796 #ifdef DEBUG
8797 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8798 #endif
8799 if(do_strace)
8800 print_syscall_ret(num, ret);
8801 return ret;
8802 efault:
8803 ret = -TARGET_EFAULT;
8804 goto fail;
8805 }