]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
qed: remove incoming live migration blocker
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73 #ifdef CONFIG_ATTR
74 #include "qemu-xattr.h"
75 #endif
76
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include "linux_loop.h"
99 #include "cpu-uname.h"
100
101 #include "qemu.h"
102
103 #if defined(CONFIG_USE_NPTL)
104 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
105 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
106 #else
107 /* XXX: Hardcode the above values. */
108 #define CLONE_NPTL_FLAGS2 0
109 #endif
110
111 //#define DEBUG
112
113 //#include <linux/msdos_fs.h>
114 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
115 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
116
117
118 #undef _syscall0
119 #undef _syscall1
120 #undef _syscall2
121 #undef _syscall3
122 #undef _syscall4
123 #undef _syscall5
124 #undef _syscall6
125
126 #define _syscall0(type,name) \
127 static type name (void) \
128 { \
129 return syscall(__NR_##name); \
130 }
131
132 #define _syscall1(type,name,type1,arg1) \
133 static type name (type1 arg1) \
134 { \
135 return syscall(__NR_##name, arg1); \
136 }
137
138 #define _syscall2(type,name,type1,arg1,type2,arg2) \
139 static type name (type1 arg1,type2 arg2) \
140 { \
141 return syscall(__NR_##name, arg1, arg2); \
142 }
143
144 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
145 static type name (type1 arg1,type2 arg2,type3 arg3) \
146 { \
147 return syscall(__NR_##name, arg1, arg2, arg3); \
148 }
149
150 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
151 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
152 { \
153 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
154 }
155
156 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
157 type5,arg5) \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
159 { \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
161 }
162
163
164 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 type5,arg5,type6,arg6) \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
167 type6 arg6) \
168 { \
169 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
170 }
171
172
173 #define __NR_sys_uname __NR_uname
174 #define __NR_sys_faccessat __NR_faccessat
175 #define __NR_sys_fchmodat __NR_fchmodat
176 #define __NR_sys_fchownat __NR_fchownat
177 #define __NR_sys_fstatat64 __NR_fstatat64
178 #define __NR_sys_futimesat __NR_futimesat
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_linkat __NR_linkat
184 #define __NR_sys_mkdirat __NR_mkdirat
185 #define __NR_sys_mknodat __NR_mknodat
186 #define __NR_sys_newfstatat __NR_newfstatat
187 #define __NR_sys_openat __NR_openat
188 #define __NR_sys_readlinkat __NR_readlinkat
189 #define __NR_sys_renameat __NR_renameat
190 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
191 #define __NR_sys_symlinkat __NR_symlinkat
192 #define __NR_sys_syslog __NR_syslog
193 #define __NR_sys_tgkill __NR_tgkill
194 #define __NR_sys_tkill __NR_tkill
195 #define __NR_sys_unlinkat __NR_unlinkat
196 #define __NR_sys_utimensat __NR_utimensat
197 #define __NR_sys_futex __NR_futex
198 #define __NR_sys_inotify_init __NR_inotify_init
199 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
200 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
201
202 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
203 defined(__s390x__)
204 #define __NR__llseek __NR_lseek
205 #endif
206
207 #ifdef __NR_gettid
208 _syscall0(int, gettid)
209 #else
210 /* This is a replacement for the host gettid() and must return a host
211 errno. */
212 static int gettid(void) {
213 return -ENOSYS;
214 }
215 #endif
216 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
217 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
218 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
219 #endif
220 _syscall2(int, sys_getpriority, int, which, int, who);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
223 loff_t *, res, uint, wh);
224 #endif
225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
227 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
228 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
229 #endif
230 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
231 _syscall2(int,sys_tkill,int,tid,int,sig)
232 #endif
233 #ifdef __NR_exit_group
234 _syscall1(int,exit_group,int,error_code)
235 #endif
236 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
237 _syscall1(int,set_tid_address,int *,tidptr)
238 #endif
239 #if defined(CONFIG_USE_NPTL)
240 #if defined(TARGET_NR_futex) && defined(__NR_futex)
241 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
242 const struct timespec *,timeout,int *,uaddr2,int,val3)
243 #endif
244 #endif
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
250 unsigned long *, user_mask_ptr);
251 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
252 void *, arg);
253
254 static bitmask_transtbl fcntl_flags_tbl[] = {
255 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
256 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
257 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
258 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
259 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
260 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
261 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
262 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
263 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
264 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
265 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
266 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
267 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
268 #if defined(O_DIRECT)
269 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
270 #endif
271 { 0, 0, 0, 0 }
272 };
273
274 #define COPY_UTSNAME_FIELD(dest, src) \
275 do { \
276 /* __NEW_UTS_LEN doesn't include terminating null */ \
277 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
278 (dest)[__NEW_UTS_LEN] = '\0'; \
279 } while (0)
280
281 static int sys_uname(struct new_utsname *buf)
282 {
283 struct utsname uts_buf;
284
285 if (uname(&uts_buf) < 0)
286 return (-1);
287
288 /*
289 * Just in case these have some differences, we
290 * translate utsname to new_utsname (which is the
291 * struct linux kernel uses).
292 */
293
294 memset(buf, 0, sizeof(*buf));
295 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
296 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
297 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
298 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
299 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
300 #ifdef _GNU_SOURCE
301 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
302 #endif
303 return (0);
304
305 #undef COPY_UTSNAME_FIELD
306 }
307
308 static int sys_getcwd1(char *buf, size_t size)
309 {
310 if (getcwd(buf, size) == NULL) {
311 /* getcwd() sets errno */
312 return (-1);
313 }
314 return strlen(buf)+1;
315 }
316
317 #ifdef CONFIG_ATFILE
318 /*
319 * Host system seems to have atfile syscall stubs available. We
320 * now enable them one by one as specified by target syscall_nr.h.
321 */
322
323 #ifdef TARGET_NR_faccessat
324 static int sys_faccessat(int dirfd, const char *pathname, int mode)
325 {
326 return (faccessat(dirfd, pathname, mode, 0));
327 }
328 #endif
329 #ifdef TARGET_NR_fchmodat
330 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
331 {
332 return (fchmodat(dirfd, pathname, mode, 0));
333 }
334 #endif
335 #if defined(TARGET_NR_fchownat)
336 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
337 gid_t group, int flags)
338 {
339 return (fchownat(dirfd, pathname, owner, group, flags));
340 }
341 #endif
342 #ifdef __NR_fstatat64
343 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
344 int flags)
345 {
346 return (fstatat(dirfd, pathname, buf, flags));
347 }
348 #endif
349 #ifdef __NR_newfstatat
350 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
351 int flags)
352 {
353 return (fstatat(dirfd, pathname, buf, flags));
354 }
355 #endif
356 #ifdef TARGET_NR_futimesat
357 static int sys_futimesat(int dirfd, const char *pathname,
358 const struct timeval times[2])
359 {
360 return (futimesat(dirfd, pathname, times));
361 }
362 #endif
363 #ifdef TARGET_NR_linkat
364 static int sys_linkat(int olddirfd, const char *oldpath,
365 int newdirfd, const char *newpath, int flags)
366 {
367 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
368 }
369 #endif
370 #ifdef TARGET_NR_mkdirat
371 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
372 {
373 return (mkdirat(dirfd, pathname, mode));
374 }
375 #endif
376 #ifdef TARGET_NR_mknodat
377 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
378 dev_t dev)
379 {
380 return (mknodat(dirfd, pathname, mode, dev));
381 }
382 #endif
383 #ifdef TARGET_NR_openat
384 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
385 {
386 /*
387 * open(2) has extra parameter 'mode' when called with
388 * flag O_CREAT.
389 */
390 if ((flags & O_CREAT) != 0) {
391 return (openat(dirfd, pathname, flags, mode));
392 }
393 return (openat(dirfd, pathname, flags));
394 }
395 #endif
396 #ifdef TARGET_NR_readlinkat
397 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
398 {
399 return (readlinkat(dirfd, pathname, buf, bufsiz));
400 }
401 #endif
402 #ifdef TARGET_NR_renameat
403 static int sys_renameat(int olddirfd, const char *oldpath,
404 int newdirfd, const char *newpath)
405 {
406 return (renameat(olddirfd, oldpath, newdirfd, newpath));
407 }
408 #endif
409 #ifdef TARGET_NR_symlinkat
410 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
411 {
412 return (symlinkat(oldpath, newdirfd, newpath));
413 }
414 #endif
415 #ifdef TARGET_NR_unlinkat
416 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
417 {
418 return (unlinkat(dirfd, pathname, flags));
419 }
420 #endif
421 #else /* !CONFIG_ATFILE */
422
423 /*
424 * Try direct syscalls instead
425 */
426 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
427 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
428 #endif
429 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
430 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
431 #endif
432 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
433 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
434 uid_t,owner,gid_t,group,int,flags)
435 #endif
436 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
437 defined(__NR_fstatat64)
438 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
439 struct stat *,buf,int,flags)
440 #endif
441 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
442 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
443 const struct timeval *,times)
444 #endif
445 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
446 defined(__NR_newfstatat)
447 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
448 struct stat *,buf,int,flags)
449 #endif
450 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
451 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
452 int,newdirfd,const char *,newpath,int,flags)
453 #endif
454 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
455 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
456 #endif
457 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
458 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
459 mode_t,mode,dev_t,dev)
460 #endif
461 #if defined(TARGET_NR_openat) && defined(__NR_openat)
462 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
463 #endif
464 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
465 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
466 char *,buf,size_t,bufsize)
467 #endif
468 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
469 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
470 int,newdirfd,const char *,newpath)
471 #endif
472 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
473 _syscall3(int,sys_symlinkat,const char *,oldpath,
474 int,newdirfd,const char *,newpath)
475 #endif
476 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
477 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
478 #endif
479
480 #endif /* CONFIG_ATFILE */
481
482 #ifdef CONFIG_UTIMENSAT
483 static int sys_utimensat(int dirfd, const char *pathname,
484 const struct timespec times[2], int flags)
485 {
486 if (pathname == NULL)
487 return futimens(dirfd, times);
488 else
489 return utimensat(dirfd, pathname, times, flags);
490 }
491 #else
492 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
493 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
494 const struct timespec *,tsp,int,flags)
495 #endif
496 #endif /* CONFIG_UTIMENSAT */
497
498 #ifdef CONFIG_INOTIFY
499 #include <sys/inotify.h>
500
501 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
502 static int sys_inotify_init(void)
503 {
504 return (inotify_init());
505 }
506 #endif
507 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
508 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
509 {
510 return (inotify_add_watch(fd, pathname, mask));
511 }
512 #endif
513 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
514 static int sys_inotify_rm_watch(int fd, int32_t wd)
515 {
516 return (inotify_rm_watch(fd, wd));
517 }
518 #endif
519 #ifdef CONFIG_INOTIFY1
520 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
521 static int sys_inotify_init1(int flags)
522 {
523 return (inotify_init1(flags));
524 }
525 #endif
526 #endif
527 #else
528 /* Userspace can usually survive runtime without inotify */
529 #undef TARGET_NR_inotify_init
530 #undef TARGET_NR_inotify_init1
531 #undef TARGET_NR_inotify_add_watch
532 #undef TARGET_NR_inotify_rm_watch
533 #endif /* CONFIG_INOTIFY */
534
535 #if defined(TARGET_NR_ppoll)
536 #ifndef __NR_ppoll
537 # define __NR_ppoll -1
538 #endif
539 #define __NR_sys_ppoll __NR_ppoll
540 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
541 struct timespec *, timeout, const __sigset_t *, sigmask,
542 size_t, sigsetsize)
543 #endif
544
545 #if defined(TARGET_NR_pselect6)
546 #ifndef __NR_pselect6
547 # define __NR_pselect6 -1
548 #endif
549 #define __NR_sys_pselect6 __NR_pselect6
550 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
551 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
552 #endif
553
554 #if defined(TARGET_NR_prlimit64)
555 #ifndef __NR_prlimit64
556 # define __NR_prlimit64 -1
557 #endif
558 #define __NR_sys_prlimit64 __NR_prlimit64
559 /* The glibc rlimit structure may not be that used by the underlying syscall */
560 struct host_rlimit64 {
561 uint64_t rlim_cur;
562 uint64_t rlim_max;
563 };
564 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
565 const struct host_rlimit64 *, new_limit,
566 struct host_rlimit64 *, old_limit)
567 #endif
568
569 extern int personality(int);
570 extern int flock(int, int);
571 extern int setfsuid(int);
572 extern int setfsgid(int);
573 extern int setgroups(int, gid_t *);
574
575 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
576 #ifdef TARGET_ARM
577 static inline int regpairs_aligned(void *cpu_env) {
578 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
579 }
580 #elif defined(TARGET_MIPS)
581 static inline int regpairs_aligned(void *cpu_env) { return 1; }
582 #else
583 static inline int regpairs_aligned(void *cpu_env) { return 0; }
584 #endif
585
586 #define ERRNO_TABLE_SIZE 1200
587
588 /* target_to_host_errno_table[] is initialized from
589 * host_to_target_errno_table[] in syscall_init(). */
590 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
591 };
592
593 /*
594 * This list is the union of errno values overridden in asm-<arch>/errno.h
595 * minus the errnos that are not actually generic to all archs.
596 */
597 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
598 [EIDRM] = TARGET_EIDRM,
599 [ECHRNG] = TARGET_ECHRNG,
600 [EL2NSYNC] = TARGET_EL2NSYNC,
601 [EL3HLT] = TARGET_EL3HLT,
602 [EL3RST] = TARGET_EL3RST,
603 [ELNRNG] = TARGET_ELNRNG,
604 [EUNATCH] = TARGET_EUNATCH,
605 [ENOCSI] = TARGET_ENOCSI,
606 [EL2HLT] = TARGET_EL2HLT,
607 [EDEADLK] = TARGET_EDEADLK,
608 [ENOLCK] = TARGET_ENOLCK,
609 [EBADE] = TARGET_EBADE,
610 [EBADR] = TARGET_EBADR,
611 [EXFULL] = TARGET_EXFULL,
612 [ENOANO] = TARGET_ENOANO,
613 [EBADRQC] = TARGET_EBADRQC,
614 [EBADSLT] = TARGET_EBADSLT,
615 [EBFONT] = TARGET_EBFONT,
616 [ENOSTR] = TARGET_ENOSTR,
617 [ENODATA] = TARGET_ENODATA,
618 [ETIME] = TARGET_ETIME,
619 [ENOSR] = TARGET_ENOSR,
620 [ENONET] = TARGET_ENONET,
621 [ENOPKG] = TARGET_ENOPKG,
622 [EREMOTE] = TARGET_EREMOTE,
623 [ENOLINK] = TARGET_ENOLINK,
624 [EADV] = TARGET_EADV,
625 [ESRMNT] = TARGET_ESRMNT,
626 [ECOMM] = TARGET_ECOMM,
627 [EPROTO] = TARGET_EPROTO,
628 [EDOTDOT] = TARGET_EDOTDOT,
629 [EMULTIHOP] = TARGET_EMULTIHOP,
630 [EBADMSG] = TARGET_EBADMSG,
631 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
632 [EOVERFLOW] = TARGET_EOVERFLOW,
633 [ENOTUNIQ] = TARGET_ENOTUNIQ,
634 [EBADFD] = TARGET_EBADFD,
635 [EREMCHG] = TARGET_EREMCHG,
636 [ELIBACC] = TARGET_ELIBACC,
637 [ELIBBAD] = TARGET_ELIBBAD,
638 [ELIBSCN] = TARGET_ELIBSCN,
639 [ELIBMAX] = TARGET_ELIBMAX,
640 [ELIBEXEC] = TARGET_ELIBEXEC,
641 [EILSEQ] = TARGET_EILSEQ,
642 [ENOSYS] = TARGET_ENOSYS,
643 [ELOOP] = TARGET_ELOOP,
644 [ERESTART] = TARGET_ERESTART,
645 [ESTRPIPE] = TARGET_ESTRPIPE,
646 [ENOTEMPTY] = TARGET_ENOTEMPTY,
647 [EUSERS] = TARGET_EUSERS,
648 [ENOTSOCK] = TARGET_ENOTSOCK,
649 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
650 [EMSGSIZE] = TARGET_EMSGSIZE,
651 [EPROTOTYPE] = TARGET_EPROTOTYPE,
652 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
653 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
654 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
655 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
656 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
657 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
658 [EADDRINUSE] = TARGET_EADDRINUSE,
659 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
660 [ENETDOWN] = TARGET_ENETDOWN,
661 [ENETUNREACH] = TARGET_ENETUNREACH,
662 [ENETRESET] = TARGET_ENETRESET,
663 [ECONNABORTED] = TARGET_ECONNABORTED,
664 [ECONNRESET] = TARGET_ECONNRESET,
665 [ENOBUFS] = TARGET_ENOBUFS,
666 [EISCONN] = TARGET_EISCONN,
667 [ENOTCONN] = TARGET_ENOTCONN,
668 [EUCLEAN] = TARGET_EUCLEAN,
669 [ENOTNAM] = TARGET_ENOTNAM,
670 [ENAVAIL] = TARGET_ENAVAIL,
671 [EISNAM] = TARGET_EISNAM,
672 [EREMOTEIO] = TARGET_EREMOTEIO,
673 [ESHUTDOWN] = TARGET_ESHUTDOWN,
674 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
675 [ETIMEDOUT] = TARGET_ETIMEDOUT,
676 [ECONNREFUSED] = TARGET_ECONNREFUSED,
677 [EHOSTDOWN] = TARGET_EHOSTDOWN,
678 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
679 [EALREADY] = TARGET_EALREADY,
680 [EINPROGRESS] = TARGET_EINPROGRESS,
681 [ESTALE] = TARGET_ESTALE,
682 [ECANCELED] = TARGET_ECANCELED,
683 [ENOMEDIUM] = TARGET_ENOMEDIUM,
684 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
685 #ifdef ENOKEY
686 [ENOKEY] = TARGET_ENOKEY,
687 #endif
688 #ifdef EKEYEXPIRED
689 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
690 #endif
691 #ifdef EKEYREVOKED
692 [EKEYREVOKED] = TARGET_EKEYREVOKED,
693 #endif
694 #ifdef EKEYREJECTED
695 [EKEYREJECTED] = TARGET_EKEYREJECTED,
696 #endif
697 #ifdef EOWNERDEAD
698 [EOWNERDEAD] = TARGET_EOWNERDEAD,
699 #endif
700 #ifdef ENOTRECOVERABLE
701 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
702 #endif
703 };
704
705 static inline int host_to_target_errno(int err)
706 {
707 if(host_to_target_errno_table[err])
708 return host_to_target_errno_table[err];
709 return err;
710 }
711
712 static inline int target_to_host_errno(int err)
713 {
714 if (target_to_host_errno_table[err])
715 return target_to_host_errno_table[err];
716 return err;
717 }
718
719 static inline abi_long get_errno(abi_long ret)
720 {
721 if (ret == -1)
722 return -host_to_target_errno(errno);
723 else
724 return ret;
725 }
726
727 static inline int is_error(abi_long ret)
728 {
729 return (abi_ulong)ret >= (abi_ulong)(-4096);
730 }
731
732 char *target_strerror(int err)
733 {
734 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
735 return NULL;
736 }
737 return strerror(target_to_host_errno(err));
738 }
739
740 static abi_ulong target_brk;
741 static abi_ulong target_original_brk;
742 static abi_ulong brk_page;
743
744 void target_set_brk(abi_ulong new_brk)
745 {
746 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
747 brk_page = HOST_PAGE_ALIGN(target_brk);
748 }
749
750 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
751 #define DEBUGF_BRK(message, args...)
752
753 /* do_brk() must return target values and target errnos. */
754 abi_long do_brk(abi_ulong new_brk)
755 {
756 abi_long mapped_addr;
757 int new_alloc_size;
758
759 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
760
761 if (!new_brk) {
762 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
763 return target_brk;
764 }
765 if (new_brk < target_original_brk) {
766 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
767 target_brk);
768 return target_brk;
769 }
770
771 /* If the new brk is less than the highest page reserved to the
772 * target heap allocation, set it and we're almost done... */
773 if (new_brk <= brk_page) {
774 /* Heap contents are initialized to zero, as for anonymous
775 * mapped pages. */
776 if (new_brk > target_brk) {
777 memset(g2h(target_brk), 0, new_brk - target_brk);
778 }
779 target_brk = new_brk;
780 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
781 return target_brk;
782 }
783
784 /* We need to allocate more memory after the brk... Note that
785 * we don't use MAP_FIXED because that will map over the top of
786 * any existing mapping (like the one with the host libc or qemu
787 * itself); instead we treat "mapped but at wrong address" as
788 * a failure and unmap again.
789 */
790 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
791 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
792 PROT_READ|PROT_WRITE,
793 MAP_ANON|MAP_PRIVATE, 0, 0));
794
795 if (mapped_addr == brk_page) {
796 /* Heap contents are initialized to zero, as for anonymous
797 * mapped pages. Technically the new pages are already
798 * initialized to zero since they *are* anonymous mapped
799 * pages, however we have to take care with the contents that
800 * come from the remaining part of the previous page: it may
801 * contains garbage data due to a previous heap usage (grown
802 * then shrunken). */
803 memset(g2h(target_brk), 0, brk_page - target_brk);
804
805 target_brk = new_brk;
806 brk_page = HOST_PAGE_ALIGN(target_brk);
807 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
808 target_brk);
809 return target_brk;
810 } else if (mapped_addr != -1) {
811 /* Mapped but at wrong address, meaning there wasn't actually
812 * enough space for this brk.
813 */
814 target_munmap(mapped_addr, new_alloc_size);
815 mapped_addr = -1;
816 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
817 }
818 else {
819 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
820 }
821
822 #if defined(TARGET_ALPHA)
823 /* We (partially) emulate OSF/1 on Alpha, which requires we
824 return a proper errno, not an unchanged brk value. */
825 return -TARGET_ENOMEM;
826 #endif
827 /* For everything else, return the previous break. */
828 return target_brk;
829 }
830
831 static inline abi_long copy_from_user_fdset(fd_set *fds,
832 abi_ulong target_fds_addr,
833 int n)
834 {
835 int i, nw, j, k;
836 abi_ulong b, *target_fds;
837
838 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
839 if (!(target_fds = lock_user(VERIFY_READ,
840 target_fds_addr,
841 sizeof(abi_ulong) * nw,
842 1)))
843 return -TARGET_EFAULT;
844
845 FD_ZERO(fds);
846 k = 0;
847 for (i = 0; i < nw; i++) {
848 /* grab the abi_ulong */
849 __get_user(b, &target_fds[i]);
850 for (j = 0; j < TARGET_ABI_BITS; j++) {
851 /* check the bit inside the abi_ulong */
852 if ((b >> j) & 1)
853 FD_SET(k, fds);
854 k++;
855 }
856 }
857
858 unlock_user(target_fds, target_fds_addr, 0);
859
860 return 0;
861 }
862
863 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
864 abi_ulong target_fds_addr,
865 int n)
866 {
867 if (target_fds_addr) {
868 if (copy_from_user_fdset(fds, target_fds_addr, n))
869 return -TARGET_EFAULT;
870 *fds_ptr = fds;
871 } else {
872 *fds_ptr = NULL;
873 }
874 return 0;
875 }
876
877 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
878 const fd_set *fds,
879 int n)
880 {
881 int i, nw, j, k;
882 abi_long v;
883 abi_ulong *target_fds;
884
885 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
886 if (!(target_fds = lock_user(VERIFY_WRITE,
887 target_fds_addr,
888 sizeof(abi_ulong) * nw,
889 0)))
890 return -TARGET_EFAULT;
891
892 k = 0;
893 for (i = 0; i < nw; i++) {
894 v = 0;
895 for (j = 0; j < TARGET_ABI_BITS; j++) {
896 v |= ((FD_ISSET(k, fds) != 0) << j);
897 k++;
898 }
899 __put_user(v, &target_fds[i]);
900 }
901
902 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
903
904 return 0;
905 }
906
907 #if defined(__alpha__)
908 #define HOST_HZ 1024
909 #else
910 #define HOST_HZ 100
911 #endif
912
913 static inline abi_long host_to_target_clock_t(long ticks)
914 {
915 #if HOST_HZ == TARGET_HZ
916 return ticks;
917 #else
918 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
919 #endif
920 }
921
922 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
923 const struct rusage *rusage)
924 {
925 struct target_rusage *target_rusage;
926
927 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
928 return -TARGET_EFAULT;
929 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
930 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
931 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
932 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
933 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
934 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
935 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
936 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
937 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
938 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
939 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
940 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
941 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
942 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
943 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
944 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
945 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
946 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
947 unlock_user_struct(target_rusage, target_addr, 1);
948
949 return 0;
950 }
951
952 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
953 {
954 abi_ulong target_rlim_swap;
955 rlim_t result;
956
957 target_rlim_swap = tswapal(target_rlim);
958 if (target_rlim_swap == TARGET_RLIM_INFINITY)
959 return RLIM_INFINITY;
960
961 result = target_rlim_swap;
962 if (target_rlim_swap != (rlim_t)result)
963 return RLIM_INFINITY;
964
965 return result;
966 }
967
968 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
969 {
970 abi_ulong target_rlim_swap;
971 abi_ulong result;
972
973 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
974 target_rlim_swap = TARGET_RLIM_INFINITY;
975 else
976 target_rlim_swap = rlim;
977 result = tswapal(target_rlim_swap);
978
979 return result;
980 }
981
982 static inline int target_to_host_resource(int code)
983 {
984 switch (code) {
985 case TARGET_RLIMIT_AS:
986 return RLIMIT_AS;
987 case TARGET_RLIMIT_CORE:
988 return RLIMIT_CORE;
989 case TARGET_RLIMIT_CPU:
990 return RLIMIT_CPU;
991 case TARGET_RLIMIT_DATA:
992 return RLIMIT_DATA;
993 case TARGET_RLIMIT_FSIZE:
994 return RLIMIT_FSIZE;
995 case TARGET_RLIMIT_LOCKS:
996 return RLIMIT_LOCKS;
997 case TARGET_RLIMIT_MEMLOCK:
998 return RLIMIT_MEMLOCK;
999 case TARGET_RLIMIT_MSGQUEUE:
1000 return RLIMIT_MSGQUEUE;
1001 case TARGET_RLIMIT_NICE:
1002 return RLIMIT_NICE;
1003 case TARGET_RLIMIT_NOFILE:
1004 return RLIMIT_NOFILE;
1005 case TARGET_RLIMIT_NPROC:
1006 return RLIMIT_NPROC;
1007 case TARGET_RLIMIT_RSS:
1008 return RLIMIT_RSS;
1009 case TARGET_RLIMIT_RTPRIO:
1010 return RLIMIT_RTPRIO;
1011 case TARGET_RLIMIT_SIGPENDING:
1012 return RLIMIT_SIGPENDING;
1013 case TARGET_RLIMIT_STACK:
1014 return RLIMIT_STACK;
1015 default:
1016 return code;
1017 }
1018 }
1019
1020 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1021 abi_ulong target_tv_addr)
1022 {
1023 struct target_timeval *target_tv;
1024
1025 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1026 return -TARGET_EFAULT;
1027
1028 __get_user(tv->tv_sec, &target_tv->tv_sec);
1029 __get_user(tv->tv_usec, &target_tv->tv_usec);
1030
1031 unlock_user_struct(target_tv, target_tv_addr, 0);
1032
1033 return 0;
1034 }
1035
1036 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1037 const struct timeval *tv)
1038 {
1039 struct target_timeval *target_tv;
1040
1041 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1042 return -TARGET_EFAULT;
1043
1044 __put_user(tv->tv_sec, &target_tv->tv_sec);
1045 __put_user(tv->tv_usec, &target_tv->tv_usec);
1046
1047 unlock_user_struct(target_tv, target_tv_addr, 1);
1048
1049 return 0;
1050 }
1051
1052 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1053 #include <mqueue.h>
1054
1055 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1056 abi_ulong target_mq_attr_addr)
1057 {
1058 struct target_mq_attr *target_mq_attr;
1059
1060 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1061 target_mq_attr_addr, 1))
1062 return -TARGET_EFAULT;
1063
1064 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1065 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1066 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1067 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1068
1069 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1070
1071 return 0;
1072 }
1073
1074 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1075 const struct mq_attr *attr)
1076 {
1077 struct target_mq_attr *target_mq_attr;
1078
1079 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1080 target_mq_attr_addr, 0))
1081 return -TARGET_EFAULT;
1082
1083 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1084 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1085 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1086 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1087
1088 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1089
1090 return 0;
1091 }
1092 #endif
1093
1094 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1095 /* do_select() must return target values and target errnos. */
1096 static abi_long do_select(int n,
1097 abi_ulong rfd_addr, abi_ulong wfd_addr,
1098 abi_ulong efd_addr, abi_ulong target_tv_addr)
1099 {
1100 fd_set rfds, wfds, efds;
1101 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1102 struct timeval tv, *tv_ptr;
1103 abi_long ret;
1104
1105 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1106 if (ret) {
1107 return ret;
1108 }
1109 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1110 if (ret) {
1111 return ret;
1112 }
1113 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1114 if (ret) {
1115 return ret;
1116 }
1117
1118 if (target_tv_addr) {
1119 if (copy_from_user_timeval(&tv, target_tv_addr))
1120 return -TARGET_EFAULT;
1121 tv_ptr = &tv;
1122 } else {
1123 tv_ptr = NULL;
1124 }
1125
1126 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1127
1128 if (!is_error(ret)) {
1129 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1130 return -TARGET_EFAULT;
1131 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1132 return -TARGET_EFAULT;
1133 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1134 return -TARGET_EFAULT;
1135
1136 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1137 return -TARGET_EFAULT;
1138 }
1139
1140 return ret;
1141 }
1142 #endif
1143
1144 static abi_long do_pipe2(int host_pipe[], int flags)
1145 {
1146 #ifdef CONFIG_PIPE2
1147 return pipe2(host_pipe, flags);
1148 #else
1149 return -ENOSYS;
1150 #endif
1151 }
1152
1153 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1154 int flags, int is_pipe2)
1155 {
1156 int host_pipe[2];
1157 abi_long ret;
1158 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1159
1160 if (is_error(ret))
1161 return get_errno(ret);
1162
1163 /* Several targets have special calling conventions for the original
1164 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1165 if (!is_pipe2) {
1166 #if defined(TARGET_ALPHA)
1167 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1168 return host_pipe[0];
1169 #elif defined(TARGET_MIPS)
1170 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1171 return host_pipe[0];
1172 #elif defined(TARGET_SH4)
1173 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1174 return host_pipe[0];
1175 #endif
1176 }
1177
1178 if (put_user_s32(host_pipe[0], pipedes)
1179 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1180 return -TARGET_EFAULT;
1181 return get_errno(ret);
1182 }
1183
1184 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1185 abi_ulong target_addr,
1186 socklen_t len)
1187 {
1188 struct target_ip_mreqn *target_smreqn;
1189
1190 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1191 if (!target_smreqn)
1192 return -TARGET_EFAULT;
1193 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1194 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1195 if (len == sizeof(struct target_ip_mreqn))
1196 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1197 unlock_user(target_smreqn, target_addr, 0);
1198
1199 return 0;
1200 }
1201
1202 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1203 abi_ulong target_addr,
1204 socklen_t len)
1205 {
1206 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1207 sa_family_t sa_family;
1208 struct target_sockaddr *target_saddr;
1209
1210 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1211 if (!target_saddr)
1212 return -TARGET_EFAULT;
1213
1214 sa_family = tswap16(target_saddr->sa_family);
1215
1216 /* Oops. The caller might send a incomplete sun_path; sun_path
1217 * must be terminated by \0 (see the manual page), but
1218 * unfortunately it is quite common to specify sockaddr_un
1219 * length as "strlen(x->sun_path)" while it should be
1220 * "strlen(...) + 1". We'll fix that here if needed.
1221 * Linux kernel has a similar feature.
1222 */
1223
1224 if (sa_family == AF_UNIX) {
1225 if (len < unix_maxlen && len > 0) {
1226 char *cp = (char*)target_saddr;
1227
1228 if ( cp[len-1] && !cp[len] )
1229 len++;
1230 }
1231 if (len > unix_maxlen)
1232 len = unix_maxlen;
1233 }
1234
1235 memcpy(addr, target_saddr, len);
1236 addr->sa_family = sa_family;
1237 unlock_user(target_saddr, target_addr, 0);
1238
1239 return 0;
1240 }
1241
1242 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1243 struct sockaddr *addr,
1244 socklen_t len)
1245 {
1246 struct target_sockaddr *target_saddr;
1247
1248 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1249 if (!target_saddr)
1250 return -TARGET_EFAULT;
1251 memcpy(target_saddr, addr, len);
1252 target_saddr->sa_family = tswap16(addr->sa_family);
1253 unlock_user(target_saddr, target_addr, len);
1254
1255 return 0;
1256 }
1257
1258 /* ??? Should this also swap msgh->name? */
1259 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1260 struct target_msghdr *target_msgh)
1261 {
1262 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1263 abi_long msg_controllen;
1264 abi_ulong target_cmsg_addr;
1265 struct target_cmsghdr *target_cmsg;
1266 socklen_t space = 0;
1267
1268 msg_controllen = tswapal(target_msgh->msg_controllen);
1269 if (msg_controllen < sizeof (struct target_cmsghdr))
1270 goto the_end;
1271 target_cmsg_addr = tswapal(target_msgh->msg_control);
1272 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1273 if (!target_cmsg)
1274 return -TARGET_EFAULT;
1275
1276 while (cmsg && target_cmsg) {
1277 void *data = CMSG_DATA(cmsg);
1278 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1279
1280 int len = tswapal(target_cmsg->cmsg_len)
1281 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1282
1283 space += CMSG_SPACE(len);
1284 if (space > msgh->msg_controllen) {
1285 space -= CMSG_SPACE(len);
1286 gemu_log("Host cmsg overflow\n");
1287 break;
1288 }
1289
1290 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1291 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1292 cmsg->cmsg_len = CMSG_LEN(len);
1293
1294 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1295 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1296 memcpy(data, target_data, len);
1297 } else {
1298 int *fd = (int *)data;
1299 int *target_fd = (int *)target_data;
1300 int i, numfds = len / sizeof(int);
1301
1302 for (i = 0; i < numfds; i++)
1303 fd[i] = tswap32(target_fd[i]);
1304 }
1305
1306 cmsg = CMSG_NXTHDR(msgh, cmsg);
1307 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1308 }
1309 unlock_user(target_cmsg, target_cmsg_addr, 0);
1310 the_end:
1311 msgh->msg_controllen = space;
1312 return 0;
1313 }
1314
1315 /* ??? Should this also swap msgh->name? */
1316 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1317 struct msghdr *msgh)
1318 {
1319 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1320 abi_long msg_controllen;
1321 abi_ulong target_cmsg_addr;
1322 struct target_cmsghdr *target_cmsg;
1323 socklen_t space = 0;
1324
1325 msg_controllen = tswapal(target_msgh->msg_controllen);
1326 if (msg_controllen < sizeof (struct target_cmsghdr))
1327 goto the_end;
1328 target_cmsg_addr = tswapal(target_msgh->msg_control);
1329 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1330 if (!target_cmsg)
1331 return -TARGET_EFAULT;
1332
1333 while (cmsg && target_cmsg) {
1334 void *data = CMSG_DATA(cmsg);
1335 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1336
1337 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1338
1339 space += TARGET_CMSG_SPACE(len);
1340 if (space > msg_controllen) {
1341 space -= TARGET_CMSG_SPACE(len);
1342 gemu_log("Target cmsg overflow\n");
1343 break;
1344 }
1345
1346 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1347 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1348 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1349
1350 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1351 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1352 memcpy(target_data, data, len);
1353 } else {
1354 int *fd = (int *)data;
1355 int *target_fd = (int *)target_data;
1356 int i, numfds = len / sizeof(int);
1357
1358 for (i = 0; i < numfds; i++)
1359 target_fd[i] = tswap32(fd[i]);
1360 }
1361
1362 cmsg = CMSG_NXTHDR(msgh, cmsg);
1363 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1364 }
1365 unlock_user(target_cmsg, target_cmsg_addr, space);
1366 the_end:
1367 target_msgh->msg_controllen = tswapal(space);
1368 return 0;
1369 }
1370
1371 /* do_setsockopt() Must return target values and target errnos. */
1372 static abi_long do_setsockopt(int sockfd, int level, int optname,
1373 abi_ulong optval_addr, socklen_t optlen)
1374 {
1375 abi_long ret;
1376 int val;
1377 struct ip_mreqn *ip_mreq;
1378 struct ip_mreq_source *ip_mreq_source;
1379
1380 switch(level) {
1381 case SOL_TCP:
1382 /* TCP options all take an 'int' value. */
1383 if (optlen < sizeof(uint32_t))
1384 return -TARGET_EINVAL;
1385
1386 if (get_user_u32(val, optval_addr))
1387 return -TARGET_EFAULT;
1388 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1389 break;
1390 case SOL_IP:
1391 switch(optname) {
1392 case IP_TOS:
1393 case IP_TTL:
1394 case IP_HDRINCL:
1395 case IP_ROUTER_ALERT:
1396 case IP_RECVOPTS:
1397 case IP_RETOPTS:
1398 case IP_PKTINFO:
1399 case IP_MTU_DISCOVER:
1400 case IP_RECVERR:
1401 case IP_RECVTOS:
1402 #ifdef IP_FREEBIND
1403 case IP_FREEBIND:
1404 #endif
1405 case IP_MULTICAST_TTL:
1406 case IP_MULTICAST_LOOP:
1407 val = 0;
1408 if (optlen >= sizeof(uint32_t)) {
1409 if (get_user_u32(val, optval_addr))
1410 return -TARGET_EFAULT;
1411 } else if (optlen >= 1) {
1412 if (get_user_u8(val, optval_addr))
1413 return -TARGET_EFAULT;
1414 }
1415 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1416 break;
1417 case IP_ADD_MEMBERSHIP:
1418 case IP_DROP_MEMBERSHIP:
1419 if (optlen < sizeof (struct target_ip_mreq) ||
1420 optlen > sizeof (struct target_ip_mreqn))
1421 return -TARGET_EINVAL;
1422
1423 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1424 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1425 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1426 break;
1427
1428 case IP_BLOCK_SOURCE:
1429 case IP_UNBLOCK_SOURCE:
1430 case IP_ADD_SOURCE_MEMBERSHIP:
1431 case IP_DROP_SOURCE_MEMBERSHIP:
1432 if (optlen != sizeof (struct target_ip_mreq_source))
1433 return -TARGET_EINVAL;
1434
1435 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1436 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1437 unlock_user (ip_mreq_source, optval_addr, 0);
1438 break;
1439
1440 default:
1441 goto unimplemented;
1442 }
1443 break;
1444 case TARGET_SOL_SOCKET:
1445 switch (optname) {
1446 /* Options with 'int' argument. */
1447 case TARGET_SO_DEBUG:
1448 optname = SO_DEBUG;
1449 break;
1450 case TARGET_SO_REUSEADDR:
1451 optname = SO_REUSEADDR;
1452 break;
1453 case TARGET_SO_TYPE:
1454 optname = SO_TYPE;
1455 break;
1456 case TARGET_SO_ERROR:
1457 optname = SO_ERROR;
1458 break;
1459 case TARGET_SO_DONTROUTE:
1460 optname = SO_DONTROUTE;
1461 break;
1462 case TARGET_SO_BROADCAST:
1463 optname = SO_BROADCAST;
1464 break;
1465 case TARGET_SO_SNDBUF:
1466 optname = SO_SNDBUF;
1467 break;
1468 case TARGET_SO_RCVBUF:
1469 optname = SO_RCVBUF;
1470 break;
1471 case TARGET_SO_KEEPALIVE:
1472 optname = SO_KEEPALIVE;
1473 break;
1474 case TARGET_SO_OOBINLINE:
1475 optname = SO_OOBINLINE;
1476 break;
1477 case TARGET_SO_NO_CHECK:
1478 optname = SO_NO_CHECK;
1479 break;
1480 case TARGET_SO_PRIORITY:
1481 optname = SO_PRIORITY;
1482 break;
1483 #ifdef SO_BSDCOMPAT
1484 case TARGET_SO_BSDCOMPAT:
1485 optname = SO_BSDCOMPAT;
1486 break;
1487 #endif
1488 case TARGET_SO_PASSCRED:
1489 optname = SO_PASSCRED;
1490 break;
1491 case TARGET_SO_TIMESTAMP:
1492 optname = SO_TIMESTAMP;
1493 break;
1494 case TARGET_SO_RCVLOWAT:
1495 optname = SO_RCVLOWAT;
1496 break;
1497 case TARGET_SO_RCVTIMEO:
1498 optname = SO_RCVTIMEO;
1499 break;
1500 case TARGET_SO_SNDTIMEO:
1501 optname = SO_SNDTIMEO;
1502 break;
1503 break;
1504 default:
1505 goto unimplemented;
1506 }
1507 if (optlen < sizeof(uint32_t))
1508 return -TARGET_EINVAL;
1509
1510 if (get_user_u32(val, optval_addr))
1511 return -TARGET_EFAULT;
1512 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1513 break;
1514 default:
1515 unimplemented:
1516 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1517 ret = -TARGET_ENOPROTOOPT;
1518 }
1519 return ret;
1520 }
1521
1522 /* do_getsockopt() Must return target values and target errnos. */
1523 static abi_long do_getsockopt(int sockfd, int level, int optname,
1524 abi_ulong optval_addr, abi_ulong optlen)
1525 {
1526 abi_long ret;
1527 int len, val;
1528 socklen_t lv;
1529
1530 switch(level) {
1531 case TARGET_SOL_SOCKET:
1532 level = SOL_SOCKET;
1533 switch (optname) {
1534 /* These don't just return a single integer */
1535 case TARGET_SO_LINGER:
1536 case TARGET_SO_RCVTIMEO:
1537 case TARGET_SO_SNDTIMEO:
1538 case TARGET_SO_PEERNAME:
1539 goto unimplemented;
1540 case TARGET_SO_PEERCRED: {
1541 struct ucred cr;
1542 socklen_t crlen;
1543 struct target_ucred *tcr;
1544
1545 if (get_user_u32(len, optlen)) {
1546 return -TARGET_EFAULT;
1547 }
1548 if (len < 0) {
1549 return -TARGET_EINVAL;
1550 }
1551
1552 crlen = sizeof(cr);
1553 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1554 &cr, &crlen));
1555 if (ret < 0) {
1556 return ret;
1557 }
1558 if (len > crlen) {
1559 len = crlen;
1560 }
1561 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1562 return -TARGET_EFAULT;
1563 }
1564 __put_user(cr.pid, &tcr->pid);
1565 __put_user(cr.uid, &tcr->uid);
1566 __put_user(cr.gid, &tcr->gid);
1567 unlock_user_struct(tcr, optval_addr, 1);
1568 if (put_user_u32(len, optlen)) {
1569 return -TARGET_EFAULT;
1570 }
1571 break;
1572 }
1573 /* Options with 'int' argument. */
1574 case TARGET_SO_DEBUG:
1575 optname = SO_DEBUG;
1576 goto int_case;
1577 case TARGET_SO_REUSEADDR:
1578 optname = SO_REUSEADDR;
1579 goto int_case;
1580 case TARGET_SO_TYPE:
1581 optname = SO_TYPE;
1582 goto int_case;
1583 case TARGET_SO_ERROR:
1584 optname = SO_ERROR;
1585 goto int_case;
1586 case TARGET_SO_DONTROUTE:
1587 optname = SO_DONTROUTE;
1588 goto int_case;
1589 case TARGET_SO_BROADCAST:
1590 optname = SO_BROADCAST;
1591 goto int_case;
1592 case TARGET_SO_SNDBUF:
1593 optname = SO_SNDBUF;
1594 goto int_case;
1595 case TARGET_SO_RCVBUF:
1596 optname = SO_RCVBUF;
1597 goto int_case;
1598 case TARGET_SO_KEEPALIVE:
1599 optname = SO_KEEPALIVE;
1600 goto int_case;
1601 case TARGET_SO_OOBINLINE:
1602 optname = SO_OOBINLINE;
1603 goto int_case;
1604 case TARGET_SO_NO_CHECK:
1605 optname = SO_NO_CHECK;
1606 goto int_case;
1607 case TARGET_SO_PRIORITY:
1608 optname = SO_PRIORITY;
1609 goto int_case;
1610 #ifdef SO_BSDCOMPAT
1611 case TARGET_SO_BSDCOMPAT:
1612 optname = SO_BSDCOMPAT;
1613 goto int_case;
1614 #endif
1615 case TARGET_SO_PASSCRED:
1616 optname = SO_PASSCRED;
1617 goto int_case;
1618 case TARGET_SO_TIMESTAMP:
1619 optname = SO_TIMESTAMP;
1620 goto int_case;
1621 case TARGET_SO_RCVLOWAT:
1622 optname = SO_RCVLOWAT;
1623 goto int_case;
1624 default:
1625 goto int_case;
1626 }
1627 break;
1628 case SOL_TCP:
1629 /* TCP options all take an 'int' value. */
1630 int_case:
1631 if (get_user_u32(len, optlen))
1632 return -TARGET_EFAULT;
1633 if (len < 0)
1634 return -TARGET_EINVAL;
1635 lv = sizeof(lv);
1636 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1637 if (ret < 0)
1638 return ret;
1639 if (len > lv)
1640 len = lv;
1641 if (len == 4) {
1642 if (put_user_u32(val, optval_addr))
1643 return -TARGET_EFAULT;
1644 } else {
1645 if (put_user_u8(val, optval_addr))
1646 return -TARGET_EFAULT;
1647 }
1648 if (put_user_u32(len, optlen))
1649 return -TARGET_EFAULT;
1650 break;
1651 case SOL_IP:
1652 switch(optname) {
1653 case IP_TOS:
1654 case IP_TTL:
1655 case IP_HDRINCL:
1656 case IP_ROUTER_ALERT:
1657 case IP_RECVOPTS:
1658 case IP_RETOPTS:
1659 case IP_PKTINFO:
1660 case IP_MTU_DISCOVER:
1661 case IP_RECVERR:
1662 case IP_RECVTOS:
1663 #ifdef IP_FREEBIND
1664 case IP_FREEBIND:
1665 #endif
1666 case IP_MULTICAST_TTL:
1667 case IP_MULTICAST_LOOP:
1668 if (get_user_u32(len, optlen))
1669 return -TARGET_EFAULT;
1670 if (len < 0)
1671 return -TARGET_EINVAL;
1672 lv = sizeof(lv);
1673 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1674 if (ret < 0)
1675 return ret;
1676 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1677 len = 1;
1678 if (put_user_u32(len, optlen)
1679 || put_user_u8(val, optval_addr))
1680 return -TARGET_EFAULT;
1681 } else {
1682 if (len > sizeof(int))
1683 len = sizeof(int);
1684 if (put_user_u32(len, optlen)
1685 || put_user_u32(val, optval_addr))
1686 return -TARGET_EFAULT;
1687 }
1688 break;
1689 default:
1690 ret = -TARGET_ENOPROTOOPT;
1691 break;
1692 }
1693 break;
1694 default:
1695 unimplemented:
1696 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1697 level, optname);
1698 ret = -TARGET_EOPNOTSUPP;
1699 break;
1700 }
1701 return ret;
1702 }
1703
1704 /* FIXME
1705 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1706 * other lock functions have a return code of 0 for failure.
1707 */
1708 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1709 int count, int copy)
1710 {
1711 struct target_iovec *target_vec;
1712 abi_ulong base;
1713 int i;
1714
1715 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1716 if (!target_vec)
1717 return -TARGET_EFAULT;
1718 for(i = 0;i < count; i++) {
1719 base = tswapal(target_vec[i].iov_base);
1720 vec[i].iov_len = tswapal(target_vec[i].iov_len);
1721 if (vec[i].iov_len != 0) {
1722 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1723 /* Don't check lock_user return value. We must call writev even
1724 if a element has invalid base address. */
1725 } else {
1726 /* zero length pointer is ignored */
1727 vec[i].iov_base = NULL;
1728 }
1729 }
1730 unlock_user (target_vec, target_addr, 0);
1731 return 0;
1732 }
1733
1734 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1735 int count, int copy)
1736 {
1737 struct target_iovec *target_vec;
1738 abi_ulong base;
1739 int i;
1740
1741 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1742 if (!target_vec)
1743 return -TARGET_EFAULT;
1744 for(i = 0;i < count; i++) {
1745 if (target_vec[i].iov_base) {
1746 base = tswapal(target_vec[i].iov_base);
1747 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1748 }
1749 }
1750 unlock_user (target_vec, target_addr, 0);
1751
1752 return 0;
1753 }
1754
1755 /* do_socket() Must return target values and target errnos. */
1756 static abi_long do_socket(int domain, int type, int protocol)
1757 {
1758 #if defined(TARGET_MIPS)
1759 switch(type) {
1760 case TARGET_SOCK_DGRAM:
1761 type = SOCK_DGRAM;
1762 break;
1763 case TARGET_SOCK_STREAM:
1764 type = SOCK_STREAM;
1765 break;
1766 case TARGET_SOCK_RAW:
1767 type = SOCK_RAW;
1768 break;
1769 case TARGET_SOCK_RDM:
1770 type = SOCK_RDM;
1771 break;
1772 case TARGET_SOCK_SEQPACKET:
1773 type = SOCK_SEQPACKET;
1774 break;
1775 case TARGET_SOCK_PACKET:
1776 type = SOCK_PACKET;
1777 break;
1778 }
1779 #endif
1780 if (domain == PF_NETLINK)
1781 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1782 return get_errno(socket(domain, type, protocol));
1783 }
1784
1785 /* do_bind() Must return target values and target errnos. */
1786 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1787 socklen_t addrlen)
1788 {
1789 void *addr;
1790 abi_long ret;
1791
1792 if ((int)addrlen < 0) {
1793 return -TARGET_EINVAL;
1794 }
1795
1796 addr = alloca(addrlen+1);
1797
1798 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1799 if (ret)
1800 return ret;
1801
1802 return get_errno(bind(sockfd, addr, addrlen));
1803 }
1804
1805 /* do_connect() Must return target values and target errnos. */
1806 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1807 socklen_t addrlen)
1808 {
1809 void *addr;
1810 abi_long ret;
1811
1812 if ((int)addrlen < 0) {
1813 return -TARGET_EINVAL;
1814 }
1815
1816 addr = alloca(addrlen);
1817
1818 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1819 if (ret)
1820 return ret;
1821
1822 return get_errno(connect(sockfd, addr, addrlen));
1823 }
1824
1825 /* do_sendrecvmsg() Must return target values and target errnos. */
1826 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1827 int flags, int send)
1828 {
1829 abi_long ret, len;
1830 struct target_msghdr *msgp;
1831 struct msghdr msg;
1832 int count;
1833 struct iovec *vec;
1834 abi_ulong target_vec;
1835
1836 /* FIXME */
1837 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1838 msgp,
1839 target_msg,
1840 send ? 1 : 0))
1841 return -TARGET_EFAULT;
1842 if (msgp->msg_name) {
1843 msg.msg_namelen = tswap32(msgp->msg_namelen);
1844 msg.msg_name = alloca(msg.msg_namelen);
1845 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1846 msg.msg_namelen);
1847 if (ret) {
1848 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1849 return ret;
1850 }
1851 } else {
1852 msg.msg_name = NULL;
1853 msg.msg_namelen = 0;
1854 }
1855 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1856 msg.msg_control = alloca(msg.msg_controllen);
1857 msg.msg_flags = tswap32(msgp->msg_flags);
1858
1859 count = tswapal(msgp->msg_iovlen);
1860 vec = alloca(count * sizeof(struct iovec));
1861 target_vec = tswapal(msgp->msg_iov);
1862 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1863 msg.msg_iovlen = count;
1864 msg.msg_iov = vec;
1865
1866 if (send) {
1867 ret = target_to_host_cmsg(&msg, msgp);
1868 if (ret == 0)
1869 ret = get_errno(sendmsg(fd, &msg, flags));
1870 } else {
1871 ret = get_errno(recvmsg(fd, &msg, flags));
1872 if (!is_error(ret)) {
1873 len = ret;
1874 ret = host_to_target_cmsg(msgp, &msg);
1875 if (!is_error(ret))
1876 ret = len;
1877 }
1878 }
1879 unlock_iovec(vec, target_vec, count, !send);
1880 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1881 return ret;
1882 }
1883
1884 /* do_accept() Must return target values and target errnos. */
1885 static abi_long do_accept(int fd, abi_ulong target_addr,
1886 abi_ulong target_addrlen_addr)
1887 {
1888 socklen_t addrlen;
1889 void *addr;
1890 abi_long ret;
1891
1892 if (target_addr == 0)
1893 return get_errno(accept(fd, NULL, NULL));
1894
1895 /* linux returns EINVAL if addrlen pointer is invalid */
1896 if (get_user_u32(addrlen, target_addrlen_addr))
1897 return -TARGET_EINVAL;
1898
1899 if ((int)addrlen < 0) {
1900 return -TARGET_EINVAL;
1901 }
1902
1903 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1904 return -TARGET_EINVAL;
1905
1906 addr = alloca(addrlen);
1907
1908 ret = get_errno(accept(fd, addr, &addrlen));
1909 if (!is_error(ret)) {
1910 host_to_target_sockaddr(target_addr, addr, addrlen);
1911 if (put_user_u32(addrlen, target_addrlen_addr))
1912 ret = -TARGET_EFAULT;
1913 }
1914 return ret;
1915 }
1916
1917 /* do_getpeername() Must return target values and target errnos. */
1918 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1919 abi_ulong target_addrlen_addr)
1920 {
1921 socklen_t addrlen;
1922 void *addr;
1923 abi_long ret;
1924
1925 if (get_user_u32(addrlen, target_addrlen_addr))
1926 return -TARGET_EFAULT;
1927
1928 if ((int)addrlen < 0) {
1929 return -TARGET_EINVAL;
1930 }
1931
1932 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1933 return -TARGET_EFAULT;
1934
1935 addr = alloca(addrlen);
1936
1937 ret = get_errno(getpeername(fd, addr, &addrlen));
1938 if (!is_error(ret)) {
1939 host_to_target_sockaddr(target_addr, addr, addrlen);
1940 if (put_user_u32(addrlen, target_addrlen_addr))
1941 ret = -TARGET_EFAULT;
1942 }
1943 return ret;
1944 }
1945
1946 /* do_getsockname() Must return target values and target errnos. */
1947 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1948 abi_ulong target_addrlen_addr)
1949 {
1950 socklen_t addrlen;
1951 void *addr;
1952 abi_long ret;
1953
1954 if (get_user_u32(addrlen, target_addrlen_addr))
1955 return -TARGET_EFAULT;
1956
1957 if ((int)addrlen < 0) {
1958 return -TARGET_EINVAL;
1959 }
1960
1961 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1962 return -TARGET_EFAULT;
1963
1964 addr = alloca(addrlen);
1965
1966 ret = get_errno(getsockname(fd, addr, &addrlen));
1967 if (!is_error(ret)) {
1968 host_to_target_sockaddr(target_addr, addr, addrlen);
1969 if (put_user_u32(addrlen, target_addrlen_addr))
1970 ret = -TARGET_EFAULT;
1971 }
1972 return ret;
1973 }
1974
1975 /* do_socketpair() Must return target values and target errnos. */
1976 static abi_long do_socketpair(int domain, int type, int protocol,
1977 abi_ulong target_tab_addr)
1978 {
1979 int tab[2];
1980 abi_long ret;
1981
1982 ret = get_errno(socketpair(domain, type, protocol, tab));
1983 if (!is_error(ret)) {
1984 if (put_user_s32(tab[0], target_tab_addr)
1985 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1986 ret = -TARGET_EFAULT;
1987 }
1988 return ret;
1989 }
1990
1991 /* do_sendto() Must return target values and target errnos. */
1992 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1993 abi_ulong target_addr, socklen_t addrlen)
1994 {
1995 void *addr;
1996 void *host_msg;
1997 abi_long ret;
1998
1999 if ((int)addrlen < 0) {
2000 return -TARGET_EINVAL;
2001 }
2002
2003 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2004 if (!host_msg)
2005 return -TARGET_EFAULT;
2006 if (target_addr) {
2007 addr = alloca(addrlen);
2008 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2009 if (ret) {
2010 unlock_user(host_msg, msg, 0);
2011 return ret;
2012 }
2013 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2014 } else {
2015 ret = get_errno(send(fd, host_msg, len, flags));
2016 }
2017 unlock_user(host_msg, msg, 0);
2018 return ret;
2019 }
2020
2021 /* do_recvfrom() Must return target values and target errnos. */
2022 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2023 abi_ulong target_addr,
2024 abi_ulong target_addrlen)
2025 {
2026 socklen_t addrlen;
2027 void *addr;
2028 void *host_msg;
2029 abi_long ret;
2030
2031 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2032 if (!host_msg)
2033 return -TARGET_EFAULT;
2034 if (target_addr) {
2035 if (get_user_u32(addrlen, target_addrlen)) {
2036 ret = -TARGET_EFAULT;
2037 goto fail;
2038 }
2039 if ((int)addrlen < 0) {
2040 ret = -TARGET_EINVAL;
2041 goto fail;
2042 }
2043 addr = alloca(addrlen);
2044 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2045 } else {
2046 addr = NULL; /* To keep compiler quiet. */
2047 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2048 }
2049 if (!is_error(ret)) {
2050 if (target_addr) {
2051 host_to_target_sockaddr(target_addr, addr, addrlen);
2052 if (put_user_u32(addrlen, target_addrlen)) {
2053 ret = -TARGET_EFAULT;
2054 goto fail;
2055 }
2056 }
2057 unlock_user(host_msg, msg, len);
2058 } else {
2059 fail:
2060 unlock_user(host_msg, msg, 0);
2061 }
2062 return ret;
2063 }
2064
2065 #ifdef TARGET_NR_socketcall
2066 /* do_socketcall() Must return target values and target errnos. */
2067 static abi_long do_socketcall(int num, abi_ulong vptr)
2068 {
2069 abi_long ret;
2070 const int n = sizeof(abi_ulong);
2071
2072 switch(num) {
2073 case SOCKOP_socket:
2074 {
2075 abi_ulong domain, type, protocol;
2076
2077 if (get_user_ual(domain, vptr)
2078 || get_user_ual(type, vptr + n)
2079 || get_user_ual(protocol, vptr + 2 * n))
2080 return -TARGET_EFAULT;
2081
2082 ret = do_socket(domain, type, protocol);
2083 }
2084 break;
2085 case SOCKOP_bind:
2086 {
2087 abi_ulong sockfd;
2088 abi_ulong target_addr;
2089 socklen_t addrlen;
2090
2091 if (get_user_ual(sockfd, vptr)
2092 || get_user_ual(target_addr, vptr + n)
2093 || get_user_ual(addrlen, vptr + 2 * n))
2094 return -TARGET_EFAULT;
2095
2096 ret = do_bind(sockfd, target_addr, addrlen);
2097 }
2098 break;
2099 case SOCKOP_connect:
2100 {
2101 abi_ulong sockfd;
2102 abi_ulong target_addr;
2103 socklen_t addrlen;
2104
2105 if (get_user_ual(sockfd, vptr)
2106 || get_user_ual(target_addr, vptr + n)
2107 || get_user_ual(addrlen, vptr + 2 * n))
2108 return -TARGET_EFAULT;
2109
2110 ret = do_connect(sockfd, target_addr, addrlen);
2111 }
2112 break;
2113 case SOCKOP_listen:
2114 {
2115 abi_ulong sockfd, backlog;
2116
2117 if (get_user_ual(sockfd, vptr)
2118 || get_user_ual(backlog, vptr + n))
2119 return -TARGET_EFAULT;
2120
2121 ret = get_errno(listen(sockfd, backlog));
2122 }
2123 break;
2124 case SOCKOP_accept:
2125 {
2126 abi_ulong sockfd;
2127 abi_ulong target_addr, target_addrlen;
2128
2129 if (get_user_ual(sockfd, vptr)
2130 || get_user_ual(target_addr, vptr + n)
2131 || get_user_ual(target_addrlen, vptr + 2 * n))
2132 return -TARGET_EFAULT;
2133
2134 ret = do_accept(sockfd, target_addr, target_addrlen);
2135 }
2136 break;
2137 case SOCKOP_getsockname:
2138 {
2139 abi_ulong sockfd;
2140 abi_ulong target_addr, target_addrlen;
2141
2142 if (get_user_ual(sockfd, vptr)
2143 || get_user_ual(target_addr, vptr + n)
2144 || get_user_ual(target_addrlen, vptr + 2 * n))
2145 return -TARGET_EFAULT;
2146
2147 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2148 }
2149 break;
2150 case SOCKOP_getpeername:
2151 {
2152 abi_ulong sockfd;
2153 abi_ulong target_addr, target_addrlen;
2154
2155 if (get_user_ual(sockfd, vptr)
2156 || get_user_ual(target_addr, vptr + n)
2157 || get_user_ual(target_addrlen, vptr + 2 * n))
2158 return -TARGET_EFAULT;
2159
2160 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2161 }
2162 break;
2163 case SOCKOP_socketpair:
2164 {
2165 abi_ulong domain, type, protocol;
2166 abi_ulong tab;
2167
2168 if (get_user_ual(domain, vptr)
2169 || get_user_ual(type, vptr + n)
2170 || get_user_ual(protocol, vptr + 2 * n)
2171 || get_user_ual(tab, vptr + 3 * n))
2172 return -TARGET_EFAULT;
2173
2174 ret = do_socketpair(domain, type, protocol, tab);
2175 }
2176 break;
2177 case SOCKOP_send:
2178 {
2179 abi_ulong sockfd;
2180 abi_ulong msg;
2181 size_t len;
2182 abi_ulong flags;
2183
2184 if (get_user_ual(sockfd, vptr)
2185 || get_user_ual(msg, vptr + n)
2186 || get_user_ual(len, vptr + 2 * n)
2187 || get_user_ual(flags, vptr + 3 * n))
2188 return -TARGET_EFAULT;
2189
2190 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2191 }
2192 break;
2193 case SOCKOP_recv:
2194 {
2195 abi_ulong sockfd;
2196 abi_ulong msg;
2197 size_t len;
2198 abi_ulong flags;
2199
2200 if (get_user_ual(sockfd, vptr)
2201 || get_user_ual(msg, vptr + n)
2202 || get_user_ual(len, vptr + 2 * n)
2203 || get_user_ual(flags, vptr + 3 * n))
2204 return -TARGET_EFAULT;
2205
2206 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2207 }
2208 break;
2209 case SOCKOP_sendto:
2210 {
2211 abi_ulong sockfd;
2212 abi_ulong msg;
2213 size_t len;
2214 abi_ulong flags;
2215 abi_ulong addr;
2216 socklen_t addrlen;
2217
2218 if (get_user_ual(sockfd, vptr)
2219 || get_user_ual(msg, vptr + n)
2220 || get_user_ual(len, vptr + 2 * n)
2221 || get_user_ual(flags, vptr + 3 * n)
2222 || get_user_ual(addr, vptr + 4 * n)
2223 || get_user_ual(addrlen, vptr + 5 * n))
2224 return -TARGET_EFAULT;
2225
2226 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2227 }
2228 break;
2229 case SOCKOP_recvfrom:
2230 {
2231 abi_ulong sockfd;
2232 abi_ulong msg;
2233 size_t len;
2234 abi_ulong flags;
2235 abi_ulong addr;
2236 socklen_t addrlen;
2237
2238 if (get_user_ual(sockfd, vptr)
2239 || get_user_ual(msg, vptr + n)
2240 || get_user_ual(len, vptr + 2 * n)
2241 || get_user_ual(flags, vptr + 3 * n)
2242 || get_user_ual(addr, vptr + 4 * n)
2243 || get_user_ual(addrlen, vptr + 5 * n))
2244 return -TARGET_EFAULT;
2245
2246 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2247 }
2248 break;
2249 case SOCKOP_shutdown:
2250 {
2251 abi_ulong sockfd, how;
2252
2253 if (get_user_ual(sockfd, vptr)
2254 || get_user_ual(how, vptr + n))
2255 return -TARGET_EFAULT;
2256
2257 ret = get_errno(shutdown(sockfd, how));
2258 }
2259 break;
2260 case SOCKOP_sendmsg:
2261 case SOCKOP_recvmsg:
2262 {
2263 abi_ulong fd;
2264 abi_ulong target_msg;
2265 abi_ulong flags;
2266
2267 if (get_user_ual(fd, vptr)
2268 || get_user_ual(target_msg, vptr + n)
2269 || get_user_ual(flags, vptr + 2 * n))
2270 return -TARGET_EFAULT;
2271
2272 ret = do_sendrecvmsg(fd, target_msg, flags,
2273 (num == SOCKOP_sendmsg));
2274 }
2275 break;
2276 case SOCKOP_setsockopt:
2277 {
2278 abi_ulong sockfd;
2279 abi_ulong level;
2280 abi_ulong optname;
2281 abi_ulong optval;
2282 socklen_t optlen;
2283
2284 if (get_user_ual(sockfd, vptr)
2285 || get_user_ual(level, vptr + n)
2286 || get_user_ual(optname, vptr + 2 * n)
2287 || get_user_ual(optval, vptr + 3 * n)
2288 || get_user_ual(optlen, vptr + 4 * n))
2289 return -TARGET_EFAULT;
2290
2291 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2292 }
2293 break;
2294 case SOCKOP_getsockopt:
2295 {
2296 abi_ulong sockfd;
2297 abi_ulong level;
2298 abi_ulong optname;
2299 abi_ulong optval;
2300 socklen_t optlen;
2301
2302 if (get_user_ual(sockfd, vptr)
2303 || get_user_ual(level, vptr + n)
2304 || get_user_ual(optname, vptr + 2 * n)
2305 || get_user_ual(optval, vptr + 3 * n)
2306 || get_user_ual(optlen, vptr + 4 * n))
2307 return -TARGET_EFAULT;
2308
2309 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2310 }
2311 break;
2312 default:
2313 gemu_log("Unsupported socketcall: %d\n", num);
2314 ret = -TARGET_ENOSYS;
2315 break;
2316 }
2317 return ret;
2318 }
2319 #endif
2320
2321 #define N_SHM_REGIONS 32
2322
2323 static struct shm_region {
2324 abi_ulong start;
2325 abi_ulong size;
2326 } shm_regions[N_SHM_REGIONS];
2327
2328 struct target_ipc_perm
2329 {
2330 abi_long __key;
2331 abi_ulong uid;
2332 abi_ulong gid;
2333 abi_ulong cuid;
2334 abi_ulong cgid;
2335 unsigned short int mode;
2336 unsigned short int __pad1;
2337 unsigned short int __seq;
2338 unsigned short int __pad2;
2339 abi_ulong __unused1;
2340 abi_ulong __unused2;
2341 };
2342
2343 struct target_semid_ds
2344 {
2345 struct target_ipc_perm sem_perm;
2346 abi_ulong sem_otime;
2347 abi_ulong __unused1;
2348 abi_ulong sem_ctime;
2349 abi_ulong __unused2;
2350 abi_ulong sem_nsems;
2351 abi_ulong __unused3;
2352 abi_ulong __unused4;
2353 };
2354
2355 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2356 abi_ulong target_addr)
2357 {
2358 struct target_ipc_perm *target_ip;
2359 struct target_semid_ds *target_sd;
2360
2361 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2362 return -TARGET_EFAULT;
2363 target_ip = &(target_sd->sem_perm);
2364 host_ip->__key = tswapal(target_ip->__key);
2365 host_ip->uid = tswapal(target_ip->uid);
2366 host_ip->gid = tswapal(target_ip->gid);
2367 host_ip->cuid = tswapal(target_ip->cuid);
2368 host_ip->cgid = tswapal(target_ip->cgid);
2369 host_ip->mode = tswap16(target_ip->mode);
2370 unlock_user_struct(target_sd, target_addr, 0);
2371 return 0;
2372 }
2373
2374 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2375 struct ipc_perm *host_ip)
2376 {
2377 struct target_ipc_perm *target_ip;
2378 struct target_semid_ds *target_sd;
2379
2380 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2381 return -TARGET_EFAULT;
2382 target_ip = &(target_sd->sem_perm);
2383 target_ip->__key = tswapal(host_ip->__key);
2384 target_ip->uid = tswapal(host_ip->uid);
2385 target_ip->gid = tswapal(host_ip->gid);
2386 target_ip->cuid = tswapal(host_ip->cuid);
2387 target_ip->cgid = tswapal(host_ip->cgid);
2388 target_ip->mode = tswap16(host_ip->mode);
2389 unlock_user_struct(target_sd, target_addr, 1);
2390 return 0;
2391 }
2392
2393 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2394 abi_ulong target_addr)
2395 {
2396 struct target_semid_ds *target_sd;
2397
2398 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2399 return -TARGET_EFAULT;
2400 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2401 return -TARGET_EFAULT;
2402 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2403 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2404 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2405 unlock_user_struct(target_sd, target_addr, 0);
2406 return 0;
2407 }
2408
2409 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2410 struct semid_ds *host_sd)
2411 {
2412 struct target_semid_ds *target_sd;
2413
2414 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2415 return -TARGET_EFAULT;
2416 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2417 return -TARGET_EFAULT;
2418 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2419 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2420 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2421 unlock_user_struct(target_sd, target_addr, 1);
2422 return 0;
2423 }
2424
2425 struct target_seminfo {
2426 int semmap;
2427 int semmni;
2428 int semmns;
2429 int semmnu;
2430 int semmsl;
2431 int semopm;
2432 int semume;
2433 int semusz;
2434 int semvmx;
2435 int semaem;
2436 };
2437
2438 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2439 struct seminfo *host_seminfo)
2440 {
2441 struct target_seminfo *target_seminfo;
2442 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2443 return -TARGET_EFAULT;
2444 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2445 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2446 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2447 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2448 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2449 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2450 __put_user(host_seminfo->semume, &target_seminfo->semume);
2451 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2452 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2453 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2454 unlock_user_struct(target_seminfo, target_addr, 1);
2455 return 0;
2456 }
2457
2458 union semun {
2459 int val;
2460 struct semid_ds *buf;
2461 unsigned short *array;
2462 struct seminfo *__buf;
2463 };
2464
2465 union target_semun {
2466 int val;
2467 abi_ulong buf;
2468 abi_ulong array;
2469 abi_ulong __buf;
2470 };
2471
2472 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2473 abi_ulong target_addr)
2474 {
2475 int nsems;
2476 unsigned short *array;
2477 union semun semun;
2478 struct semid_ds semid_ds;
2479 int i, ret;
2480
2481 semun.buf = &semid_ds;
2482
2483 ret = semctl(semid, 0, IPC_STAT, semun);
2484 if (ret == -1)
2485 return get_errno(ret);
2486
2487 nsems = semid_ds.sem_nsems;
2488
2489 *host_array = malloc(nsems*sizeof(unsigned short));
2490 array = lock_user(VERIFY_READ, target_addr,
2491 nsems*sizeof(unsigned short), 1);
2492 if (!array)
2493 return -TARGET_EFAULT;
2494
2495 for(i=0; i<nsems; i++) {
2496 __get_user((*host_array)[i], &array[i]);
2497 }
2498 unlock_user(array, target_addr, 0);
2499
2500 return 0;
2501 }
2502
2503 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2504 unsigned short **host_array)
2505 {
2506 int nsems;
2507 unsigned short *array;
2508 union semun semun;
2509 struct semid_ds semid_ds;
2510 int i, ret;
2511
2512 semun.buf = &semid_ds;
2513
2514 ret = semctl(semid, 0, IPC_STAT, semun);
2515 if (ret == -1)
2516 return get_errno(ret);
2517
2518 nsems = semid_ds.sem_nsems;
2519
2520 array = lock_user(VERIFY_WRITE, target_addr,
2521 nsems*sizeof(unsigned short), 0);
2522 if (!array)
2523 return -TARGET_EFAULT;
2524
2525 for(i=0; i<nsems; i++) {
2526 __put_user((*host_array)[i], &array[i]);
2527 }
2528 free(*host_array);
2529 unlock_user(array, target_addr, 1);
2530
2531 return 0;
2532 }
2533
2534 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2535 union target_semun target_su)
2536 {
2537 union semun arg;
2538 struct semid_ds dsarg;
2539 unsigned short *array = NULL;
2540 struct seminfo seminfo;
2541 abi_long ret = -TARGET_EINVAL;
2542 abi_long err;
2543 cmd &= 0xff;
2544
2545 switch( cmd ) {
2546 case GETVAL:
2547 case SETVAL:
2548 arg.val = tswap32(target_su.val);
2549 ret = get_errno(semctl(semid, semnum, cmd, arg));
2550 target_su.val = tswap32(arg.val);
2551 break;
2552 case GETALL:
2553 case SETALL:
2554 err = target_to_host_semarray(semid, &array, target_su.array);
2555 if (err)
2556 return err;
2557 arg.array = array;
2558 ret = get_errno(semctl(semid, semnum, cmd, arg));
2559 err = host_to_target_semarray(semid, target_su.array, &array);
2560 if (err)
2561 return err;
2562 break;
2563 case IPC_STAT:
2564 case IPC_SET:
2565 case SEM_STAT:
2566 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2567 if (err)
2568 return err;
2569 arg.buf = &dsarg;
2570 ret = get_errno(semctl(semid, semnum, cmd, arg));
2571 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2572 if (err)
2573 return err;
2574 break;
2575 case IPC_INFO:
2576 case SEM_INFO:
2577 arg.__buf = &seminfo;
2578 ret = get_errno(semctl(semid, semnum, cmd, arg));
2579 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2580 if (err)
2581 return err;
2582 break;
2583 case IPC_RMID:
2584 case GETPID:
2585 case GETNCNT:
2586 case GETZCNT:
2587 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2588 break;
2589 }
2590
2591 return ret;
2592 }
2593
2594 struct target_sembuf {
2595 unsigned short sem_num;
2596 short sem_op;
2597 short sem_flg;
2598 };
2599
2600 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2601 abi_ulong target_addr,
2602 unsigned nsops)
2603 {
2604 struct target_sembuf *target_sembuf;
2605 int i;
2606
2607 target_sembuf = lock_user(VERIFY_READ, target_addr,
2608 nsops*sizeof(struct target_sembuf), 1);
2609 if (!target_sembuf)
2610 return -TARGET_EFAULT;
2611
2612 for(i=0; i<nsops; i++) {
2613 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2614 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2615 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2616 }
2617
2618 unlock_user(target_sembuf, target_addr, 0);
2619
2620 return 0;
2621 }
2622
2623 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2624 {
2625 struct sembuf sops[nsops];
2626
2627 if (target_to_host_sembuf(sops, ptr, nsops))
2628 return -TARGET_EFAULT;
2629
2630 return semop(semid, sops, nsops);
2631 }
2632
2633 struct target_msqid_ds
2634 {
2635 struct target_ipc_perm msg_perm;
2636 abi_ulong msg_stime;
2637 #if TARGET_ABI_BITS == 32
2638 abi_ulong __unused1;
2639 #endif
2640 abi_ulong msg_rtime;
2641 #if TARGET_ABI_BITS == 32
2642 abi_ulong __unused2;
2643 #endif
2644 abi_ulong msg_ctime;
2645 #if TARGET_ABI_BITS == 32
2646 abi_ulong __unused3;
2647 #endif
2648 abi_ulong __msg_cbytes;
2649 abi_ulong msg_qnum;
2650 abi_ulong msg_qbytes;
2651 abi_ulong msg_lspid;
2652 abi_ulong msg_lrpid;
2653 abi_ulong __unused4;
2654 abi_ulong __unused5;
2655 };
2656
2657 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2658 abi_ulong target_addr)
2659 {
2660 struct target_msqid_ds *target_md;
2661
2662 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2663 return -TARGET_EFAULT;
2664 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2665 return -TARGET_EFAULT;
2666 host_md->msg_stime = tswapal(target_md->msg_stime);
2667 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2668 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2669 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2670 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2671 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2672 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2673 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2674 unlock_user_struct(target_md, target_addr, 0);
2675 return 0;
2676 }
2677
2678 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2679 struct msqid_ds *host_md)
2680 {
2681 struct target_msqid_ds *target_md;
2682
2683 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2684 return -TARGET_EFAULT;
2685 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2686 return -TARGET_EFAULT;
2687 target_md->msg_stime = tswapal(host_md->msg_stime);
2688 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2689 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2690 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2691 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2692 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2693 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2694 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2695 unlock_user_struct(target_md, target_addr, 1);
2696 return 0;
2697 }
2698
2699 struct target_msginfo {
2700 int msgpool;
2701 int msgmap;
2702 int msgmax;
2703 int msgmnb;
2704 int msgmni;
2705 int msgssz;
2706 int msgtql;
2707 unsigned short int msgseg;
2708 };
2709
2710 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2711 struct msginfo *host_msginfo)
2712 {
2713 struct target_msginfo *target_msginfo;
2714 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2715 return -TARGET_EFAULT;
2716 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2717 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2718 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2719 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2720 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2721 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2722 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2723 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2724 unlock_user_struct(target_msginfo, target_addr, 1);
2725 return 0;
2726 }
2727
2728 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2729 {
2730 struct msqid_ds dsarg;
2731 struct msginfo msginfo;
2732 abi_long ret = -TARGET_EINVAL;
2733
2734 cmd &= 0xff;
2735
2736 switch (cmd) {
2737 case IPC_STAT:
2738 case IPC_SET:
2739 case MSG_STAT:
2740 if (target_to_host_msqid_ds(&dsarg,ptr))
2741 return -TARGET_EFAULT;
2742 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2743 if (host_to_target_msqid_ds(ptr,&dsarg))
2744 return -TARGET_EFAULT;
2745 break;
2746 case IPC_RMID:
2747 ret = get_errno(msgctl(msgid, cmd, NULL));
2748 break;
2749 case IPC_INFO:
2750 case MSG_INFO:
2751 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2752 if (host_to_target_msginfo(ptr, &msginfo))
2753 return -TARGET_EFAULT;
2754 break;
2755 }
2756
2757 return ret;
2758 }
2759
2760 struct target_msgbuf {
2761 abi_long mtype;
2762 char mtext[1];
2763 };
2764
2765 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2766 unsigned int msgsz, int msgflg)
2767 {
2768 struct target_msgbuf *target_mb;
2769 struct msgbuf *host_mb;
2770 abi_long ret = 0;
2771
2772 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2773 return -TARGET_EFAULT;
2774 host_mb = malloc(msgsz+sizeof(long));
2775 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2776 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2777 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2778 free(host_mb);
2779 unlock_user_struct(target_mb, msgp, 0);
2780
2781 return ret;
2782 }
2783
2784 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2785 unsigned int msgsz, abi_long msgtyp,
2786 int msgflg)
2787 {
2788 struct target_msgbuf *target_mb;
2789 char *target_mtext;
2790 struct msgbuf *host_mb;
2791 abi_long ret = 0;
2792
2793 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2794 return -TARGET_EFAULT;
2795
2796 host_mb = malloc(msgsz+sizeof(long));
2797 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2798
2799 if (ret > 0) {
2800 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2801 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2802 if (!target_mtext) {
2803 ret = -TARGET_EFAULT;
2804 goto end;
2805 }
2806 memcpy(target_mb->mtext, host_mb->mtext, ret);
2807 unlock_user(target_mtext, target_mtext_addr, ret);
2808 }
2809
2810 target_mb->mtype = tswapal(host_mb->mtype);
2811 free(host_mb);
2812
2813 end:
2814 if (target_mb)
2815 unlock_user_struct(target_mb, msgp, 1);
2816 return ret;
2817 }
2818
2819 struct target_shmid_ds
2820 {
2821 struct target_ipc_perm shm_perm;
2822 abi_ulong shm_segsz;
2823 abi_ulong shm_atime;
2824 #if TARGET_ABI_BITS == 32
2825 abi_ulong __unused1;
2826 #endif
2827 abi_ulong shm_dtime;
2828 #if TARGET_ABI_BITS == 32
2829 abi_ulong __unused2;
2830 #endif
2831 abi_ulong shm_ctime;
2832 #if TARGET_ABI_BITS == 32
2833 abi_ulong __unused3;
2834 #endif
2835 int shm_cpid;
2836 int shm_lpid;
2837 abi_ulong shm_nattch;
2838 unsigned long int __unused4;
2839 unsigned long int __unused5;
2840 };
2841
2842 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2843 abi_ulong target_addr)
2844 {
2845 struct target_shmid_ds *target_sd;
2846
2847 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2848 return -TARGET_EFAULT;
2849 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2850 return -TARGET_EFAULT;
2851 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2852 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2853 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2854 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2855 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2856 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2857 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2858 unlock_user_struct(target_sd, target_addr, 0);
2859 return 0;
2860 }
2861
2862 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2863 struct shmid_ds *host_sd)
2864 {
2865 struct target_shmid_ds *target_sd;
2866
2867 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2868 return -TARGET_EFAULT;
2869 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2870 return -TARGET_EFAULT;
2871 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2872 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2873 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2874 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2875 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2876 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2877 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2878 unlock_user_struct(target_sd, target_addr, 1);
2879 return 0;
2880 }
2881
2882 struct target_shminfo {
2883 abi_ulong shmmax;
2884 abi_ulong shmmin;
2885 abi_ulong shmmni;
2886 abi_ulong shmseg;
2887 abi_ulong shmall;
2888 };
2889
2890 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2891 struct shminfo *host_shminfo)
2892 {
2893 struct target_shminfo *target_shminfo;
2894 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2895 return -TARGET_EFAULT;
2896 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2897 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2898 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2899 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2900 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2901 unlock_user_struct(target_shminfo, target_addr, 1);
2902 return 0;
2903 }
2904
2905 struct target_shm_info {
2906 int used_ids;
2907 abi_ulong shm_tot;
2908 abi_ulong shm_rss;
2909 abi_ulong shm_swp;
2910 abi_ulong swap_attempts;
2911 abi_ulong swap_successes;
2912 };
2913
2914 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2915 struct shm_info *host_shm_info)
2916 {
2917 struct target_shm_info *target_shm_info;
2918 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2919 return -TARGET_EFAULT;
2920 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2921 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2922 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2923 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2924 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2925 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2926 unlock_user_struct(target_shm_info, target_addr, 1);
2927 return 0;
2928 }
2929
2930 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2931 {
2932 struct shmid_ds dsarg;
2933 struct shminfo shminfo;
2934 struct shm_info shm_info;
2935 abi_long ret = -TARGET_EINVAL;
2936
2937 cmd &= 0xff;
2938
2939 switch(cmd) {
2940 case IPC_STAT:
2941 case IPC_SET:
2942 case SHM_STAT:
2943 if (target_to_host_shmid_ds(&dsarg, buf))
2944 return -TARGET_EFAULT;
2945 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2946 if (host_to_target_shmid_ds(buf, &dsarg))
2947 return -TARGET_EFAULT;
2948 break;
2949 case IPC_INFO:
2950 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2951 if (host_to_target_shminfo(buf, &shminfo))
2952 return -TARGET_EFAULT;
2953 break;
2954 case SHM_INFO:
2955 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2956 if (host_to_target_shm_info(buf, &shm_info))
2957 return -TARGET_EFAULT;
2958 break;
2959 case IPC_RMID:
2960 case SHM_LOCK:
2961 case SHM_UNLOCK:
2962 ret = get_errno(shmctl(shmid, cmd, NULL));
2963 break;
2964 }
2965
2966 return ret;
2967 }
2968
2969 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2970 {
2971 abi_long raddr;
2972 void *host_raddr;
2973 struct shmid_ds shm_info;
2974 int i,ret;
2975
2976 /* find out the length of the shared memory segment */
2977 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2978 if (is_error(ret)) {
2979 /* can't get length, bail out */
2980 return ret;
2981 }
2982
2983 mmap_lock();
2984
2985 if (shmaddr)
2986 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2987 else {
2988 abi_ulong mmap_start;
2989
2990 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2991
2992 if (mmap_start == -1) {
2993 errno = ENOMEM;
2994 host_raddr = (void *)-1;
2995 } else
2996 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2997 }
2998
2999 if (host_raddr == (void *)-1) {
3000 mmap_unlock();
3001 return get_errno((long)host_raddr);
3002 }
3003 raddr=h2g((unsigned long)host_raddr);
3004
3005 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3006 PAGE_VALID | PAGE_READ |
3007 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3008
3009 for (i = 0; i < N_SHM_REGIONS; i++) {
3010 if (shm_regions[i].start == 0) {
3011 shm_regions[i].start = raddr;
3012 shm_regions[i].size = shm_info.shm_segsz;
3013 break;
3014 }
3015 }
3016
3017 mmap_unlock();
3018 return raddr;
3019
3020 }
3021
3022 static inline abi_long do_shmdt(abi_ulong shmaddr)
3023 {
3024 int i;
3025
3026 for (i = 0; i < N_SHM_REGIONS; ++i) {
3027 if (shm_regions[i].start == shmaddr) {
3028 shm_regions[i].start = 0;
3029 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3030 break;
3031 }
3032 }
3033
3034 return get_errno(shmdt(g2h(shmaddr)));
3035 }
3036
3037 #ifdef TARGET_NR_ipc
3038 /* ??? This only works with linear mappings. */
3039 /* do_ipc() must return target values and target errnos. */
3040 static abi_long do_ipc(unsigned int call, int first,
3041 int second, int third,
3042 abi_long ptr, abi_long fifth)
3043 {
3044 int version;
3045 abi_long ret = 0;
3046
3047 version = call >> 16;
3048 call &= 0xffff;
3049
3050 switch (call) {
3051 case IPCOP_semop:
3052 ret = do_semop(first, ptr, second);
3053 break;
3054
3055 case IPCOP_semget:
3056 ret = get_errno(semget(first, second, third));
3057 break;
3058
3059 case IPCOP_semctl:
3060 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3061 break;
3062
3063 case IPCOP_msgget:
3064 ret = get_errno(msgget(first, second));
3065 break;
3066
3067 case IPCOP_msgsnd:
3068 ret = do_msgsnd(first, ptr, second, third);
3069 break;
3070
3071 case IPCOP_msgctl:
3072 ret = do_msgctl(first, second, ptr);
3073 break;
3074
3075 case IPCOP_msgrcv:
3076 switch (version) {
3077 case 0:
3078 {
3079 struct target_ipc_kludge {
3080 abi_long msgp;
3081 abi_long msgtyp;
3082 } *tmp;
3083
3084 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3085 ret = -TARGET_EFAULT;
3086 break;
3087 }
3088
3089 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3090
3091 unlock_user_struct(tmp, ptr, 0);
3092 break;
3093 }
3094 default:
3095 ret = do_msgrcv(first, ptr, second, fifth, third);
3096 }
3097 break;
3098
3099 case IPCOP_shmat:
3100 switch (version) {
3101 default:
3102 {
3103 abi_ulong raddr;
3104 raddr = do_shmat(first, ptr, second);
3105 if (is_error(raddr))
3106 return get_errno(raddr);
3107 if (put_user_ual(raddr, third))
3108 return -TARGET_EFAULT;
3109 break;
3110 }
3111 case 1:
3112 ret = -TARGET_EINVAL;
3113 break;
3114 }
3115 break;
3116 case IPCOP_shmdt:
3117 ret = do_shmdt(ptr);
3118 break;
3119
3120 case IPCOP_shmget:
3121 /* IPC_* flag values are the same on all linux platforms */
3122 ret = get_errno(shmget(first, second, third));
3123 break;
3124
3125 /* IPC_* and SHM_* command values are the same on all linux platforms */
3126 case IPCOP_shmctl:
3127 ret = do_shmctl(first, second, third);
3128 break;
3129 default:
3130 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3131 ret = -TARGET_ENOSYS;
3132 break;
3133 }
3134 return ret;
3135 }
3136 #endif
3137
3138 /* kernel structure types definitions */
3139
3140 #define STRUCT(name, ...) STRUCT_ ## name,
3141 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3142 enum {
3143 #include "syscall_types.h"
3144 };
3145 #undef STRUCT
3146 #undef STRUCT_SPECIAL
3147
3148 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3149 #define STRUCT_SPECIAL(name)
3150 #include "syscall_types.h"
3151 #undef STRUCT
3152 #undef STRUCT_SPECIAL
3153
3154 typedef struct IOCTLEntry IOCTLEntry;
3155
3156 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3157 int fd, abi_long cmd, abi_long arg);
3158
3159 struct IOCTLEntry {
3160 unsigned int target_cmd;
3161 unsigned int host_cmd;
3162 const char *name;
3163 int access;
3164 do_ioctl_fn *do_ioctl;
3165 const argtype arg_type[5];
3166 };
3167
3168 #define IOC_R 0x0001
3169 #define IOC_W 0x0002
3170 #define IOC_RW (IOC_R | IOC_W)
3171
3172 #define MAX_STRUCT_SIZE 4096
3173
3174 #ifdef CONFIG_FIEMAP
3175 /* So fiemap access checks don't overflow on 32 bit systems.
3176 * This is very slightly smaller than the limit imposed by
3177 * the underlying kernel.
3178 */
3179 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3180 / sizeof(struct fiemap_extent))
3181
3182 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3183 int fd, abi_long cmd, abi_long arg)
3184 {
3185 /* The parameter for this ioctl is a struct fiemap followed
3186 * by an array of struct fiemap_extent whose size is set
3187 * in fiemap->fm_extent_count. The array is filled in by the
3188 * ioctl.
3189 */
3190 int target_size_in, target_size_out;
3191 struct fiemap *fm;
3192 const argtype *arg_type = ie->arg_type;
3193 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3194 void *argptr, *p;
3195 abi_long ret;
3196 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3197 uint32_t outbufsz;
3198 int free_fm = 0;
3199
3200 assert(arg_type[0] == TYPE_PTR);
3201 assert(ie->access == IOC_RW);
3202 arg_type++;
3203 target_size_in = thunk_type_size(arg_type, 0);
3204 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3205 if (!argptr) {
3206 return -TARGET_EFAULT;
3207 }
3208 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3209 unlock_user(argptr, arg, 0);
3210 fm = (struct fiemap *)buf_temp;
3211 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3212 return -TARGET_EINVAL;
3213 }
3214
3215 outbufsz = sizeof (*fm) +
3216 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3217
3218 if (outbufsz > MAX_STRUCT_SIZE) {
3219 /* We can't fit all the extents into the fixed size buffer.
3220 * Allocate one that is large enough and use it instead.
3221 */
3222 fm = malloc(outbufsz);
3223 if (!fm) {
3224 return -TARGET_ENOMEM;
3225 }
3226 memcpy(fm, buf_temp, sizeof(struct fiemap));
3227 free_fm = 1;
3228 }
3229 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3230 if (!is_error(ret)) {
3231 target_size_out = target_size_in;
3232 /* An extent_count of 0 means we were only counting the extents
3233 * so there are no structs to copy
3234 */
3235 if (fm->fm_extent_count != 0) {
3236 target_size_out += fm->fm_mapped_extents * extent_size;
3237 }
3238 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3239 if (!argptr) {
3240 ret = -TARGET_EFAULT;
3241 } else {
3242 /* Convert the struct fiemap */
3243 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3244 if (fm->fm_extent_count != 0) {
3245 p = argptr + target_size_in;
3246 /* ...and then all the struct fiemap_extents */
3247 for (i = 0; i < fm->fm_mapped_extents; i++) {
3248 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3249 THUNK_TARGET);
3250 p += extent_size;
3251 }
3252 }
3253 unlock_user(argptr, arg, target_size_out);
3254 }
3255 }
3256 if (free_fm) {
3257 free(fm);
3258 }
3259 return ret;
3260 }
3261 #endif
3262
3263 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3264 int fd, abi_long cmd, abi_long arg)
3265 {
3266 const argtype *arg_type = ie->arg_type;
3267 int target_size;
3268 void *argptr;
3269 int ret;
3270 struct ifconf *host_ifconf;
3271 uint32_t outbufsz;
3272 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3273 int target_ifreq_size;
3274 int nb_ifreq;
3275 int free_buf = 0;
3276 int i;
3277 int target_ifc_len;
3278 abi_long target_ifc_buf;
3279 int host_ifc_len;
3280 char *host_ifc_buf;
3281
3282 assert(arg_type[0] == TYPE_PTR);
3283 assert(ie->access == IOC_RW);
3284
3285 arg_type++;
3286 target_size = thunk_type_size(arg_type, 0);
3287
3288 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3289 if (!argptr)
3290 return -TARGET_EFAULT;
3291 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3292 unlock_user(argptr, arg, 0);
3293
3294 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3295 target_ifc_len = host_ifconf->ifc_len;
3296 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3297
3298 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3299 nb_ifreq = target_ifc_len / target_ifreq_size;
3300 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3301
3302 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3303 if (outbufsz > MAX_STRUCT_SIZE) {
3304 /* We can't fit all the extents into the fixed size buffer.
3305 * Allocate one that is large enough and use it instead.
3306 */
3307 host_ifconf = malloc(outbufsz);
3308 if (!host_ifconf) {
3309 return -TARGET_ENOMEM;
3310 }
3311 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3312 free_buf = 1;
3313 }
3314 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3315
3316 host_ifconf->ifc_len = host_ifc_len;
3317 host_ifconf->ifc_buf = host_ifc_buf;
3318
3319 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3320 if (!is_error(ret)) {
3321 /* convert host ifc_len to target ifc_len */
3322
3323 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3324 target_ifc_len = nb_ifreq * target_ifreq_size;
3325 host_ifconf->ifc_len = target_ifc_len;
3326
3327 /* restore target ifc_buf */
3328
3329 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3330
3331 /* copy struct ifconf to target user */
3332
3333 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3334 if (!argptr)
3335 return -TARGET_EFAULT;
3336 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3337 unlock_user(argptr, arg, target_size);
3338
3339 /* copy ifreq[] to target user */
3340
3341 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3342 for (i = 0; i < nb_ifreq ; i++) {
3343 thunk_convert(argptr + i * target_ifreq_size,
3344 host_ifc_buf + i * sizeof(struct ifreq),
3345 ifreq_arg_type, THUNK_TARGET);
3346 }
3347 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3348 }
3349
3350 if (free_buf) {
3351 free(host_ifconf);
3352 }
3353
3354 return ret;
3355 }
3356
3357 static IOCTLEntry ioctl_entries[] = {
3358 #define IOCTL(cmd, access, ...) \
3359 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3360 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3361 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3362 #include "ioctls.h"
3363 { 0, 0, },
3364 };
3365
3366 /* ??? Implement proper locking for ioctls. */
3367 /* do_ioctl() Must return target values and target errnos. */
3368 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3369 {
3370 const IOCTLEntry *ie;
3371 const argtype *arg_type;
3372 abi_long ret;
3373 uint8_t buf_temp[MAX_STRUCT_SIZE];
3374 int target_size;
3375 void *argptr;
3376
3377 ie = ioctl_entries;
3378 for(;;) {
3379 if (ie->target_cmd == 0) {
3380 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3381 return -TARGET_ENOSYS;
3382 }
3383 if (ie->target_cmd == cmd)
3384 break;
3385 ie++;
3386 }
3387 arg_type = ie->arg_type;
3388 #if defined(DEBUG)
3389 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3390 #endif
3391 if (ie->do_ioctl) {
3392 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3393 }
3394
3395 switch(arg_type[0]) {
3396 case TYPE_NULL:
3397 /* no argument */
3398 ret = get_errno(ioctl(fd, ie->host_cmd));
3399 break;
3400 case TYPE_PTRVOID:
3401 case TYPE_INT:
3402 /* int argment */
3403 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3404 break;
3405 case TYPE_PTR:
3406 arg_type++;
3407 target_size = thunk_type_size(arg_type, 0);
3408 switch(ie->access) {
3409 case IOC_R:
3410 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3411 if (!is_error(ret)) {
3412 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3413 if (!argptr)
3414 return -TARGET_EFAULT;
3415 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3416 unlock_user(argptr, arg, target_size);
3417 }
3418 break;
3419 case IOC_W:
3420 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3421 if (!argptr)
3422 return -TARGET_EFAULT;
3423 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3424 unlock_user(argptr, arg, 0);
3425 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3426 break;
3427 default:
3428 case IOC_RW:
3429 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3430 if (!argptr)
3431 return -TARGET_EFAULT;
3432 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3433 unlock_user(argptr, arg, 0);
3434 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3435 if (!is_error(ret)) {
3436 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3437 if (!argptr)
3438 return -TARGET_EFAULT;
3439 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3440 unlock_user(argptr, arg, target_size);
3441 }
3442 break;
3443 }
3444 break;
3445 default:
3446 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3447 (long)cmd, arg_type[0]);
3448 ret = -TARGET_ENOSYS;
3449 break;
3450 }
3451 return ret;
3452 }
3453
3454 static const bitmask_transtbl iflag_tbl[] = {
3455 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3456 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3457 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3458 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3459 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3460 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3461 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3462 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3463 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3464 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3465 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3466 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3467 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3468 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3469 { 0, 0, 0, 0 }
3470 };
3471
3472 static const bitmask_transtbl oflag_tbl[] = {
3473 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3474 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3475 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3476 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3477 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3478 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3479 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3480 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3481 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3482 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3483 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3484 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3485 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3486 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3487 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3488 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3489 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3490 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3491 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3492 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3493 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3494 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3495 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3496 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3497 { 0, 0, 0, 0 }
3498 };
3499
3500 static const bitmask_transtbl cflag_tbl[] = {
3501 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3502 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3503 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3504 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3505 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3506 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3507 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3508 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3509 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3510 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3511 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3512 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3513 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3514 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3515 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3516 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3517 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3518 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3519 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3520 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3521 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3522 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3523 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3524 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3525 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3526 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3527 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3528 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3529 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3530 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3531 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3532 { 0, 0, 0, 0 }
3533 };
3534
3535 static const bitmask_transtbl lflag_tbl[] = {
3536 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3537 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3538 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3539 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3540 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3541 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3542 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3543 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3544 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3545 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3546 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3547 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3548 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3549 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3550 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3551 { 0, 0, 0, 0 }
3552 };
3553
3554 static void target_to_host_termios (void *dst, const void *src)
3555 {
3556 struct host_termios *host = dst;
3557 const struct target_termios *target = src;
3558
3559 host->c_iflag =
3560 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3561 host->c_oflag =
3562 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3563 host->c_cflag =
3564 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3565 host->c_lflag =
3566 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3567 host->c_line = target->c_line;
3568
3569 memset(host->c_cc, 0, sizeof(host->c_cc));
3570 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3571 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3572 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3573 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3574 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3575 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3576 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3577 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3578 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3579 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3580 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3581 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3582 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3583 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3584 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3585 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3586 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3587 }
3588
3589 static void host_to_target_termios (void *dst, const void *src)
3590 {
3591 struct target_termios *target = dst;
3592 const struct host_termios *host = src;
3593
3594 target->c_iflag =
3595 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3596 target->c_oflag =
3597 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3598 target->c_cflag =
3599 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3600 target->c_lflag =
3601 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3602 target->c_line = host->c_line;
3603
3604 memset(target->c_cc, 0, sizeof(target->c_cc));
3605 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3606 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3607 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3608 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3609 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3610 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3611 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3612 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3613 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3614 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3615 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3616 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3617 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3618 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3619 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3620 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3621 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3622 }
3623
3624 static const StructEntry struct_termios_def = {
3625 .convert = { host_to_target_termios, target_to_host_termios },
3626 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3627 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3628 };
3629
3630 static bitmask_transtbl mmap_flags_tbl[] = {
3631 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3632 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3633 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3634 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3635 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3636 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3637 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3638 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3639 { 0, 0, 0, 0 }
3640 };
3641
3642 #if defined(TARGET_I386)
3643
3644 /* NOTE: there is really one LDT for all the threads */
3645 static uint8_t *ldt_table;
3646
3647 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3648 {
3649 int size;
3650 void *p;
3651
3652 if (!ldt_table)
3653 return 0;
3654 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3655 if (size > bytecount)
3656 size = bytecount;
3657 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3658 if (!p)
3659 return -TARGET_EFAULT;
3660 /* ??? Should this by byteswapped? */
3661 memcpy(p, ldt_table, size);
3662 unlock_user(p, ptr, size);
3663 return size;
3664 }
3665
3666 /* XXX: add locking support */
3667 static abi_long write_ldt(CPUX86State *env,
3668 abi_ulong ptr, unsigned long bytecount, int oldmode)
3669 {
3670 struct target_modify_ldt_ldt_s ldt_info;
3671 struct target_modify_ldt_ldt_s *target_ldt_info;
3672 int seg_32bit, contents, read_exec_only, limit_in_pages;
3673 int seg_not_present, useable, lm;
3674 uint32_t *lp, entry_1, entry_2;
3675
3676 if (bytecount != sizeof(ldt_info))
3677 return -TARGET_EINVAL;
3678 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3679 return -TARGET_EFAULT;
3680 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3681 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3682 ldt_info.limit = tswap32(target_ldt_info->limit);
3683 ldt_info.flags = tswap32(target_ldt_info->flags);
3684 unlock_user_struct(target_ldt_info, ptr, 0);
3685
3686 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3687 return -TARGET_EINVAL;
3688 seg_32bit = ldt_info.flags & 1;
3689 contents = (ldt_info.flags >> 1) & 3;
3690 read_exec_only = (ldt_info.flags >> 3) & 1;
3691 limit_in_pages = (ldt_info.flags >> 4) & 1;
3692 seg_not_present = (ldt_info.flags >> 5) & 1;
3693 useable = (ldt_info.flags >> 6) & 1;
3694 #ifdef TARGET_ABI32
3695 lm = 0;
3696 #else
3697 lm = (ldt_info.flags >> 7) & 1;
3698 #endif
3699 if (contents == 3) {
3700 if (oldmode)
3701 return -TARGET_EINVAL;
3702 if (seg_not_present == 0)
3703 return -TARGET_EINVAL;
3704 }
3705 /* allocate the LDT */
3706 if (!ldt_table) {
3707 env->ldt.base = target_mmap(0,
3708 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3709 PROT_READ|PROT_WRITE,
3710 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3711 if (env->ldt.base == -1)
3712 return -TARGET_ENOMEM;
3713 memset(g2h(env->ldt.base), 0,
3714 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3715 env->ldt.limit = 0xffff;
3716 ldt_table = g2h(env->ldt.base);
3717 }
3718
3719 /* NOTE: same code as Linux kernel */
3720 /* Allow LDTs to be cleared by the user. */
3721 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3722 if (oldmode ||
3723 (contents == 0 &&
3724 read_exec_only == 1 &&
3725 seg_32bit == 0 &&
3726 limit_in_pages == 0 &&
3727 seg_not_present == 1 &&
3728 useable == 0 )) {
3729 entry_1 = 0;
3730 entry_2 = 0;
3731 goto install;
3732 }
3733 }
3734
3735 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3736 (ldt_info.limit & 0x0ffff);
3737 entry_2 = (ldt_info.base_addr & 0xff000000) |
3738 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3739 (ldt_info.limit & 0xf0000) |
3740 ((read_exec_only ^ 1) << 9) |
3741 (contents << 10) |
3742 ((seg_not_present ^ 1) << 15) |
3743 (seg_32bit << 22) |
3744 (limit_in_pages << 23) |
3745 (lm << 21) |
3746 0x7000;
3747 if (!oldmode)
3748 entry_2 |= (useable << 20);
3749
3750 /* Install the new entry ... */
3751 install:
3752 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3753 lp[0] = tswap32(entry_1);
3754 lp[1] = tswap32(entry_2);
3755 return 0;
3756 }
3757
3758 /* specific and weird i386 syscalls */
3759 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3760 unsigned long bytecount)
3761 {
3762 abi_long ret;
3763
3764 switch (func) {
3765 case 0:
3766 ret = read_ldt(ptr, bytecount);
3767 break;
3768 case 1:
3769 ret = write_ldt(env, ptr, bytecount, 1);
3770 break;
3771 case 0x11:
3772 ret = write_ldt(env, ptr, bytecount, 0);
3773 break;
3774 default:
3775 ret = -TARGET_ENOSYS;
3776 break;
3777 }
3778 return ret;
3779 }
3780
3781 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3782 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3783 {
3784 uint64_t *gdt_table = g2h(env->gdt.base);
3785 struct target_modify_ldt_ldt_s ldt_info;
3786 struct target_modify_ldt_ldt_s *target_ldt_info;
3787 int seg_32bit, contents, read_exec_only, limit_in_pages;
3788 int seg_not_present, useable, lm;
3789 uint32_t *lp, entry_1, entry_2;
3790 int i;
3791
3792 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3793 if (!target_ldt_info)
3794 return -TARGET_EFAULT;
3795 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3796 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3797 ldt_info.limit = tswap32(target_ldt_info->limit);
3798 ldt_info.flags = tswap32(target_ldt_info->flags);
3799 if (ldt_info.entry_number == -1) {
3800 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3801 if (gdt_table[i] == 0) {
3802 ldt_info.entry_number = i;
3803 target_ldt_info->entry_number = tswap32(i);
3804 break;
3805 }
3806 }
3807 }
3808 unlock_user_struct(target_ldt_info, ptr, 1);
3809
3810 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3811 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3812 return -TARGET_EINVAL;
3813 seg_32bit = ldt_info.flags & 1;
3814 contents = (ldt_info.flags >> 1) & 3;
3815 read_exec_only = (ldt_info.flags >> 3) & 1;
3816 limit_in_pages = (ldt_info.flags >> 4) & 1;
3817 seg_not_present = (ldt_info.flags >> 5) & 1;
3818 useable = (ldt_info.flags >> 6) & 1;
3819 #ifdef TARGET_ABI32
3820 lm = 0;
3821 #else
3822 lm = (ldt_info.flags >> 7) & 1;
3823 #endif
3824
3825 if (contents == 3) {
3826 if (seg_not_present == 0)
3827 return -TARGET_EINVAL;
3828 }
3829
3830 /* NOTE: same code as Linux kernel */
3831 /* Allow LDTs to be cleared by the user. */
3832 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3833 if ((contents == 0 &&
3834 read_exec_only == 1 &&
3835 seg_32bit == 0 &&
3836 limit_in_pages == 0 &&
3837 seg_not_present == 1 &&
3838 useable == 0 )) {
3839 entry_1 = 0;
3840 entry_2 = 0;
3841 goto install;
3842 }
3843 }
3844
3845 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3846 (ldt_info.limit & 0x0ffff);
3847 entry_2 = (ldt_info.base_addr & 0xff000000) |
3848 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3849 (ldt_info.limit & 0xf0000) |
3850 ((read_exec_only ^ 1) << 9) |
3851 (contents << 10) |
3852 ((seg_not_present ^ 1) << 15) |
3853 (seg_32bit << 22) |
3854 (limit_in_pages << 23) |
3855 (useable << 20) |
3856 (lm << 21) |
3857 0x7000;
3858
3859 /* Install the new entry ... */
3860 install:
3861 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3862 lp[0] = tswap32(entry_1);
3863 lp[1] = tswap32(entry_2);
3864 return 0;
3865 }
3866
3867 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3868 {
3869 struct target_modify_ldt_ldt_s *target_ldt_info;
3870 uint64_t *gdt_table = g2h(env->gdt.base);
3871 uint32_t base_addr, limit, flags;
3872 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3873 int seg_not_present, useable, lm;
3874 uint32_t *lp, entry_1, entry_2;
3875
3876 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3877 if (!target_ldt_info)
3878 return -TARGET_EFAULT;
3879 idx = tswap32(target_ldt_info->entry_number);
3880 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3881 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3882 unlock_user_struct(target_ldt_info, ptr, 1);
3883 return -TARGET_EINVAL;
3884 }
3885 lp = (uint32_t *)(gdt_table + idx);
3886 entry_1 = tswap32(lp[0]);
3887 entry_2 = tswap32(lp[1]);
3888
3889 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3890 contents = (entry_2 >> 10) & 3;
3891 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3892 seg_32bit = (entry_2 >> 22) & 1;
3893 limit_in_pages = (entry_2 >> 23) & 1;
3894 useable = (entry_2 >> 20) & 1;
3895 #ifdef TARGET_ABI32
3896 lm = 0;
3897 #else
3898 lm = (entry_2 >> 21) & 1;
3899 #endif
3900 flags = (seg_32bit << 0) | (contents << 1) |
3901 (read_exec_only << 3) | (limit_in_pages << 4) |
3902 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3903 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3904 base_addr = (entry_1 >> 16) |
3905 (entry_2 & 0xff000000) |
3906 ((entry_2 & 0xff) << 16);
3907 target_ldt_info->base_addr = tswapal(base_addr);
3908 target_ldt_info->limit = tswap32(limit);
3909 target_ldt_info->flags = tswap32(flags);
3910 unlock_user_struct(target_ldt_info, ptr, 1);
3911 return 0;
3912 }
3913 #endif /* TARGET_I386 && TARGET_ABI32 */
3914
3915 #ifndef TARGET_ABI32
3916 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3917 {
3918 abi_long ret = 0;
3919 abi_ulong val;
3920 int idx;
3921
3922 switch(code) {
3923 case TARGET_ARCH_SET_GS:
3924 case TARGET_ARCH_SET_FS:
3925 if (code == TARGET_ARCH_SET_GS)
3926 idx = R_GS;
3927 else
3928 idx = R_FS;
3929 cpu_x86_load_seg(env, idx, 0);
3930 env->segs[idx].base = addr;
3931 break;
3932 case TARGET_ARCH_GET_GS:
3933 case TARGET_ARCH_GET_FS:
3934 if (code == TARGET_ARCH_GET_GS)
3935 idx = R_GS;
3936 else
3937 idx = R_FS;
3938 val = env->segs[idx].base;
3939 if (put_user(val, addr, abi_ulong))
3940 ret = -TARGET_EFAULT;
3941 break;
3942 default:
3943 ret = -TARGET_EINVAL;
3944 break;
3945 }
3946 return ret;
3947 }
3948 #endif
3949
3950 #endif /* defined(TARGET_I386) */
3951
3952 #define NEW_STACK_SIZE 0x40000
3953
3954 #if defined(CONFIG_USE_NPTL)
3955
3956 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3957 typedef struct {
3958 CPUArchState *env;
3959 pthread_mutex_t mutex;
3960 pthread_cond_t cond;
3961 pthread_t thread;
3962 uint32_t tid;
3963 abi_ulong child_tidptr;
3964 abi_ulong parent_tidptr;
3965 sigset_t sigmask;
3966 } new_thread_info;
3967
3968 static void *clone_func(void *arg)
3969 {
3970 new_thread_info *info = arg;
3971 CPUArchState *env;
3972 TaskState *ts;
3973
3974 env = info->env;
3975 thread_env = env;
3976 ts = (TaskState *)thread_env->opaque;
3977 info->tid = gettid();
3978 env->host_tid = info->tid;
3979 task_settid(ts);
3980 if (info->child_tidptr)
3981 put_user_u32(info->tid, info->child_tidptr);
3982 if (info->parent_tidptr)
3983 put_user_u32(info->tid, info->parent_tidptr);
3984 /* Enable signals. */
3985 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3986 /* Signal to the parent that we're ready. */
3987 pthread_mutex_lock(&info->mutex);
3988 pthread_cond_broadcast(&info->cond);
3989 pthread_mutex_unlock(&info->mutex);
3990 /* Wait until the parent has finshed initializing the tls state. */
3991 pthread_mutex_lock(&clone_lock);
3992 pthread_mutex_unlock(&clone_lock);
3993 cpu_loop(env);
3994 /* never exits */
3995 return NULL;
3996 }
3997 #else
3998
3999 static int clone_func(void *arg)
4000 {
4001 CPUArchState *env = arg;
4002 cpu_loop(env);
4003 /* never exits */
4004 return 0;
4005 }
4006 #endif
4007
4008 /* do_fork() Must return host values and target errnos (unlike most
4009 do_*() functions). */
4010 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4011 abi_ulong parent_tidptr, target_ulong newtls,
4012 abi_ulong child_tidptr)
4013 {
4014 int ret;
4015 TaskState *ts;
4016 CPUArchState *new_env;
4017 #if defined(CONFIG_USE_NPTL)
4018 unsigned int nptl_flags;
4019 sigset_t sigmask;
4020 #else
4021 uint8_t *new_stack;
4022 #endif
4023
4024 /* Emulate vfork() with fork() */
4025 if (flags & CLONE_VFORK)
4026 flags &= ~(CLONE_VFORK | CLONE_VM);
4027
4028 if (flags & CLONE_VM) {
4029 TaskState *parent_ts = (TaskState *)env->opaque;
4030 #if defined(CONFIG_USE_NPTL)
4031 new_thread_info info;
4032 pthread_attr_t attr;
4033 #endif
4034 ts = g_malloc0(sizeof(TaskState));
4035 init_task_state(ts);
4036 /* we create a new CPU instance. */
4037 new_env = cpu_copy(env);
4038 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4039 cpu_state_reset(new_env);
4040 #endif
4041 /* Init regs that differ from the parent. */
4042 cpu_clone_regs(new_env, newsp);
4043 new_env->opaque = ts;
4044 ts->bprm = parent_ts->bprm;
4045 ts->info = parent_ts->info;
4046 #if defined(CONFIG_USE_NPTL)
4047 nptl_flags = flags;
4048 flags &= ~CLONE_NPTL_FLAGS2;
4049
4050 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4051 ts->child_tidptr = child_tidptr;
4052 }
4053
4054 if (nptl_flags & CLONE_SETTLS)
4055 cpu_set_tls (new_env, newtls);
4056
4057 /* Grab a mutex so that thread setup appears atomic. */
4058 pthread_mutex_lock(&clone_lock);
4059
4060 memset(&info, 0, sizeof(info));
4061 pthread_mutex_init(&info.mutex, NULL);
4062 pthread_mutex_lock(&info.mutex);
4063 pthread_cond_init(&info.cond, NULL);
4064 info.env = new_env;
4065 if (nptl_flags & CLONE_CHILD_SETTID)
4066 info.child_tidptr = child_tidptr;
4067 if (nptl_flags & CLONE_PARENT_SETTID)
4068 info.parent_tidptr = parent_tidptr;
4069
4070 ret = pthread_attr_init(&attr);
4071 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4072 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4073 /* It is not safe to deliver signals until the child has finished
4074 initializing, so temporarily block all signals. */
4075 sigfillset(&sigmask);
4076 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4077
4078 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4079 /* TODO: Free new CPU state if thread creation failed. */
4080
4081 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4082 pthread_attr_destroy(&attr);
4083 if (ret == 0) {
4084 /* Wait for the child to initialize. */
4085 pthread_cond_wait(&info.cond, &info.mutex);
4086 ret = info.tid;
4087 if (flags & CLONE_PARENT_SETTID)
4088 put_user_u32(ret, parent_tidptr);
4089 } else {
4090 ret = -1;
4091 }
4092 pthread_mutex_unlock(&info.mutex);
4093 pthread_cond_destroy(&info.cond);
4094 pthread_mutex_destroy(&info.mutex);
4095 pthread_mutex_unlock(&clone_lock);
4096 #else
4097 if (flags & CLONE_NPTL_FLAGS2)
4098 return -EINVAL;
4099 /* This is probably going to die very quickly, but do it anyway. */
4100 new_stack = g_malloc0 (NEW_STACK_SIZE);
4101 #ifdef __ia64__
4102 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4103 #else
4104 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4105 #endif
4106 #endif
4107 } else {
4108 /* if no CLONE_VM, we consider it is a fork */
4109 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4110 return -EINVAL;
4111 fork_start();
4112 ret = fork();
4113 if (ret == 0) {
4114 /* Child Process. */
4115 cpu_clone_regs(env, newsp);
4116 fork_end(1);
4117 #if defined(CONFIG_USE_NPTL)
4118 /* There is a race condition here. The parent process could
4119 theoretically read the TID in the child process before the child
4120 tid is set. This would require using either ptrace
4121 (not implemented) or having *_tidptr to point at a shared memory
4122 mapping. We can't repeat the spinlock hack used above because
4123 the child process gets its own copy of the lock. */
4124 if (flags & CLONE_CHILD_SETTID)
4125 put_user_u32(gettid(), child_tidptr);
4126 if (flags & CLONE_PARENT_SETTID)
4127 put_user_u32(gettid(), parent_tidptr);
4128 ts = (TaskState *)env->opaque;
4129 if (flags & CLONE_SETTLS)
4130 cpu_set_tls (env, newtls);
4131 if (flags & CLONE_CHILD_CLEARTID)
4132 ts->child_tidptr = child_tidptr;
4133 #endif
4134 } else {
4135 fork_end(0);
4136 }
4137 }
4138 return ret;
4139 }
4140
4141 /* warning : doesn't handle linux specific flags... */
4142 static int target_to_host_fcntl_cmd(int cmd)
4143 {
4144 switch(cmd) {
4145 case TARGET_F_DUPFD:
4146 case TARGET_F_GETFD:
4147 case TARGET_F_SETFD:
4148 case TARGET_F_GETFL:
4149 case TARGET_F_SETFL:
4150 return cmd;
4151 case TARGET_F_GETLK:
4152 return F_GETLK;
4153 case TARGET_F_SETLK:
4154 return F_SETLK;
4155 case TARGET_F_SETLKW:
4156 return F_SETLKW;
4157 case TARGET_F_GETOWN:
4158 return F_GETOWN;
4159 case TARGET_F_SETOWN:
4160 return F_SETOWN;
4161 case TARGET_F_GETSIG:
4162 return F_GETSIG;
4163 case TARGET_F_SETSIG:
4164 return F_SETSIG;
4165 #if TARGET_ABI_BITS == 32
4166 case TARGET_F_GETLK64:
4167 return F_GETLK64;
4168 case TARGET_F_SETLK64:
4169 return F_SETLK64;
4170 case TARGET_F_SETLKW64:
4171 return F_SETLKW64;
4172 #endif
4173 case TARGET_F_SETLEASE:
4174 return F_SETLEASE;
4175 case TARGET_F_GETLEASE:
4176 return F_GETLEASE;
4177 #ifdef F_DUPFD_CLOEXEC
4178 case TARGET_F_DUPFD_CLOEXEC:
4179 return F_DUPFD_CLOEXEC;
4180 #endif
4181 case TARGET_F_NOTIFY:
4182 return F_NOTIFY;
4183 default:
4184 return -TARGET_EINVAL;
4185 }
4186 return -TARGET_EINVAL;
4187 }
4188
4189 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4190 {
4191 struct flock fl;
4192 struct target_flock *target_fl;
4193 struct flock64 fl64;
4194 struct target_flock64 *target_fl64;
4195 abi_long ret;
4196 int host_cmd = target_to_host_fcntl_cmd(cmd);
4197
4198 if (host_cmd == -TARGET_EINVAL)
4199 return host_cmd;
4200
4201 switch(cmd) {
4202 case TARGET_F_GETLK:
4203 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4204 return -TARGET_EFAULT;
4205 fl.l_type = tswap16(target_fl->l_type);
4206 fl.l_whence = tswap16(target_fl->l_whence);
4207 fl.l_start = tswapal(target_fl->l_start);
4208 fl.l_len = tswapal(target_fl->l_len);
4209 fl.l_pid = tswap32(target_fl->l_pid);
4210 unlock_user_struct(target_fl, arg, 0);
4211 ret = get_errno(fcntl(fd, host_cmd, &fl));
4212 if (ret == 0) {
4213 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4214 return -TARGET_EFAULT;
4215 target_fl->l_type = tswap16(fl.l_type);
4216 target_fl->l_whence = tswap16(fl.l_whence);
4217 target_fl->l_start = tswapal(fl.l_start);
4218 target_fl->l_len = tswapal(fl.l_len);
4219 target_fl->l_pid = tswap32(fl.l_pid);
4220 unlock_user_struct(target_fl, arg, 1);
4221 }
4222 break;
4223
4224 case TARGET_F_SETLK:
4225 case TARGET_F_SETLKW:
4226 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4227 return -TARGET_EFAULT;
4228 fl.l_type = tswap16(target_fl->l_type);
4229 fl.l_whence = tswap16(target_fl->l_whence);
4230 fl.l_start = tswapal(target_fl->l_start);
4231 fl.l_len = tswapal(target_fl->l_len);
4232 fl.l_pid = tswap32(target_fl->l_pid);
4233 unlock_user_struct(target_fl, arg, 0);
4234 ret = get_errno(fcntl(fd, host_cmd, &fl));
4235 break;
4236
4237 case TARGET_F_GETLK64:
4238 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4239 return -TARGET_EFAULT;
4240 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4241 fl64.l_whence = tswap16(target_fl64->l_whence);
4242 fl64.l_start = tswap64(target_fl64->l_start);
4243 fl64.l_len = tswap64(target_fl64->l_len);
4244 fl64.l_pid = tswap32(target_fl64->l_pid);
4245 unlock_user_struct(target_fl64, arg, 0);
4246 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4247 if (ret == 0) {
4248 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4249 return -TARGET_EFAULT;
4250 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4251 target_fl64->l_whence = tswap16(fl64.l_whence);
4252 target_fl64->l_start = tswap64(fl64.l_start);
4253 target_fl64->l_len = tswap64(fl64.l_len);
4254 target_fl64->l_pid = tswap32(fl64.l_pid);
4255 unlock_user_struct(target_fl64, arg, 1);
4256 }
4257 break;
4258 case TARGET_F_SETLK64:
4259 case TARGET_F_SETLKW64:
4260 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4261 return -TARGET_EFAULT;
4262 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4263 fl64.l_whence = tswap16(target_fl64->l_whence);
4264 fl64.l_start = tswap64(target_fl64->l_start);
4265 fl64.l_len = tswap64(target_fl64->l_len);
4266 fl64.l_pid = tswap32(target_fl64->l_pid);
4267 unlock_user_struct(target_fl64, arg, 0);
4268 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4269 break;
4270
4271 case TARGET_F_GETFL:
4272 ret = get_errno(fcntl(fd, host_cmd, arg));
4273 if (ret >= 0) {
4274 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4275 }
4276 break;
4277
4278 case TARGET_F_SETFL:
4279 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4280 break;
4281
4282 case TARGET_F_SETOWN:
4283 case TARGET_F_GETOWN:
4284 case TARGET_F_SETSIG:
4285 case TARGET_F_GETSIG:
4286 case TARGET_F_SETLEASE:
4287 case TARGET_F_GETLEASE:
4288 ret = get_errno(fcntl(fd, host_cmd, arg));
4289 break;
4290
4291 default:
4292 ret = get_errno(fcntl(fd, cmd, arg));
4293 break;
4294 }
4295 return ret;
4296 }
4297
4298 #ifdef USE_UID16
4299
4300 static inline int high2lowuid(int uid)
4301 {
4302 if (uid > 65535)
4303 return 65534;
4304 else
4305 return uid;
4306 }
4307
4308 static inline int high2lowgid(int gid)
4309 {
4310 if (gid > 65535)
4311 return 65534;
4312 else
4313 return gid;
4314 }
4315
4316 static inline int low2highuid(int uid)
4317 {
4318 if ((int16_t)uid == -1)
4319 return -1;
4320 else
4321 return uid;
4322 }
4323
4324 static inline int low2highgid(int gid)
4325 {
4326 if ((int16_t)gid == -1)
4327 return -1;
4328 else
4329 return gid;
4330 }
4331 static inline int tswapid(int id)
4332 {
4333 return tswap16(id);
4334 }
4335 #else /* !USE_UID16 */
4336 static inline int high2lowuid(int uid)
4337 {
4338 return uid;
4339 }
4340 static inline int high2lowgid(int gid)
4341 {
4342 return gid;
4343 }
4344 static inline int low2highuid(int uid)
4345 {
4346 return uid;
4347 }
4348 static inline int low2highgid(int gid)
4349 {
4350 return gid;
4351 }
4352 static inline int tswapid(int id)
4353 {
4354 return tswap32(id);
4355 }
4356 #endif /* USE_UID16 */
4357
4358 void syscall_init(void)
4359 {
4360 IOCTLEntry *ie;
4361 const argtype *arg_type;
4362 int size;
4363 int i;
4364
4365 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4366 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4367 #include "syscall_types.h"
4368 #undef STRUCT
4369 #undef STRUCT_SPECIAL
4370
4371 /* we patch the ioctl size if necessary. We rely on the fact that
4372 no ioctl has all the bits at '1' in the size field */
4373 ie = ioctl_entries;
4374 while (ie->target_cmd != 0) {
4375 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4376 TARGET_IOC_SIZEMASK) {
4377 arg_type = ie->arg_type;
4378 if (arg_type[0] != TYPE_PTR) {
4379 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4380 ie->target_cmd);
4381 exit(1);
4382 }
4383 arg_type++;
4384 size = thunk_type_size(arg_type, 0);
4385 ie->target_cmd = (ie->target_cmd &
4386 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4387 (size << TARGET_IOC_SIZESHIFT);
4388 }
4389
4390 /* Build target_to_host_errno_table[] table from
4391 * host_to_target_errno_table[]. */
4392 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4393 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4394
4395 /* automatic consistency check if same arch */
4396 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4397 (defined(__x86_64__) && defined(TARGET_X86_64))
4398 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4399 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4400 ie->name, ie->target_cmd, ie->host_cmd);
4401 }
4402 #endif
4403 ie++;
4404 }
4405 }
4406
4407 #if TARGET_ABI_BITS == 32
4408 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4409 {
4410 #ifdef TARGET_WORDS_BIGENDIAN
4411 return ((uint64_t)word0 << 32) | word1;
4412 #else
4413 return ((uint64_t)word1 << 32) | word0;
4414 #endif
4415 }
4416 #else /* TARGET_ABI_BITS == 32 */
4417 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4418 {
4419 return word0;
4420 }
4421 #endif /* TARGET_ABI_BITS != 32 */
4422
4423 #ifdef TARGET_NR_truncate64
4424 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4425 abi_long arg2,
4426 abi_long arg3,
4427 abi_long arg4)
4428 {
4429 if (regpairs_aligned(cpu_env)) {
4430 arg2 = arg3;
4431 arg3 = arg4;
4432 }
4433 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4434 }
4435 #endif
4436
4437 #ifdef TARGET_NR_ftruncate64
4438 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4439 abi_long arg2,
4440 abi_long arg3,
4441 abi_long arg4)
4442 {
4443 if (regpairs_aligned(cpu_env)) {
4444 arg2 = arg3;
4445 arg3 = arg4;
4446 }
4447 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4448 }
4449 #endif
4450
4451 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4452 abi_ulong target_addr)
4453 {
4454 struct target_timespec *target_ts;
4455
4456 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4457 return -TARGET_EFAULT;
4458 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4459 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4460 unlock_user_struct(target_ts, target_addr, 0);
4461 return 0;
4462 }
4463
4464 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4465 struct timespec *host_ts)
4466 {
4467 struct target_timespec *target_ts;
4468
4469 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4470 return -TARGET_EFAULT;
4471 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4472 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4473 unlock_user_struct(target_ts, target_addr, 1);
4474 return 0;
4475 }
4476
4477 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4478 static inline abi_long host_to_target_stat64(void *cpu_env,
4479 abi_ulong target_addr,
4480 struct stat *host_st)
4481 {
4482 #ifdef TARGET_ARM
4483 if (((CPUARMState *)cpu_env)->eabi) {
4484 struct target_eabi_stat64 *target_st;
4485
4486 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4487 return -TARGET_EFAULT;
4488 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4489 __put_user(host_st->st_dev, &target_st->st_dev);
4490 __put_user(host_st->st_ino, &target_st->st_ino);
4491 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4492 __put_user(host_st->st_ino, &target_st->__st_ino);
4493 #endif
4494 __put_user(host_st->st_mode, &target_st->st_mode);
4495 __put_user(host_st->st_nlink, &target_st->st_nlink);
4496 __put_user(host_st->st_uid, &target_st->st_uid);
4497 __put_user(host_st->st_gid, &target_st->st_gid);
4498 __put_user(host_st->st_rdev, &target_st->st_rdev);
4499 __put_user(host_st->st_size, &target_st->st_size);
4500 __put_user(host_st->st_blksize, &target_st->st_blksize);
4501 __put_user(host_st->st_blocks, &target_st->st_blocks);
4502 __put_user(host_st->st_atime, &target_st->target_st_atime);
4503 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4504 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4505 unlock_user_struct(target_st, target_addr, 1);
4506 } else
4507 #endif
4508 {
4509 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4510 struct target_stat *target_st;
4511 #else
4512 struct target_stat64 *target_st;
4513 #endif
4514
4515 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4516 return -TARGET_EFAULT;
4517 memset(target_st, 0, sizeof(*target_st));
4518 __put_user(host_st->st_dev, &target_st->st_dev);
4519 __put_user(host_st->st_ino, &target_st->st_ino);
4520 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4521 __put_user(host_st->st_ino, &target_st->__st_ino);
4522 #endif
4523 __put_user(host_st->st_mode, &target_st->st_mode);
4524 __put_user(host_st->st_nlink, &target_st->st_nlink);
4525 __put_user(host_st->st_uid, &target_st->st_uid);
4526 __put_user(host_st->st_gid, &target_st->st_gid);
4527 __put_user(host_st->st_rdev, &target_st->st_rdev);
4528 /* XXX: better use of kernel struct */
4529 __put_user(host_st->st_size, &target_st->st_size);
4530 __put_user(host_st->st_blksize, &target_st->st_blksize);
4531 __put_user(host_st->st_blocks, &target_st->st_blocks);
4532 __put_user(host_st->st_atime, &target_st->target_st_atime);
4533 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4534 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4535 unlock_user_struct(target_st, target_addr, 1);
4536 }
4537
4538 return 0;
4539 }
4540 #endif
4541
4542 #if defined(CONFIG_USE_NPTL)
4543 /* ??? Using host futex calls even when target atomic operations
4544 are not really atomic probably breaks things. However implementing
4545 futexes locally would make futexes shared between multiple processes
4546 tricky. However they're probably useless because guest atomic
4547 operations won't work either. */
4548 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4549 target_ulong uaddr2, int val3)
4550 {
4551 struct timespec ts, *pts;
4552 int base_op;
4553
4554 /* ??? We assume FUTEX_* constants are the same on both host
4555 and target. */
4556 #ifdef FUTEX_CMD_MASK
4557 base_op = op & FUTEX_CMD_MASK;
4558 #else
4559 base_op = op;
4560 #endif
4561 switch (base_op) {
4562 case FUTEX_WAIT:
4563 if (timeout) {
4564 pts = &ts;
4565 target_to_host_timespec(pts, timeout);
4566 } else {
4567 pts = NULL;
4568 }
4569 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4570 pts, NULL, 0));
4571 case FUTEX_WAKE:
4572 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4573 case FUTEX_FD:
4574 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4575 case FUTEX_REQUEUE:
4576 case FUTEX_CMP_REQUEUE:
4577 case FUTEX_WAKE_OP:
4578 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4579 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4580 But the prototype takes a `struct timespec *'; insert casts
4581 to satisfy the compiler. We do not need to tswap TIMEOUT
4582 since it's not compared to guest memory. */
4583 pts = (struct timespec *)(uintptr_t) timeout;
4584 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4585 g2h(uaddr2),
4586 (base_op == FUTEX_CMP_REQUEUE
4587 ? tswap32(val3)
4588 : val3)));
4589 default:
4590 return -TARGET_ENOSYS;
4591 }
4592 }
4593 #endif
4594
4595 /* Map host to target signal numbers for the wait family of syscalls.
4596 Assume all other status bits are the same. */
4597 static int host_to_target_waitstatus(int status)
4598 {
4599 if (WIFSIGNALED(status)) {
4600 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4601 }
4602 if (WIFSTOPPED(status)) {
4603 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4604 | (status & 0xff);
4605 }
4606 return status;
4607 }
4608
4609 int get_osversion(void)
4610 {
4611 static int osversion;
4612 struct new_utsname buf;
4613 const char *s;
4614 int i, n, tmp;
4615 if (osversion)
4616 return osversion;
4617 if (qemu_uname_release && *qemu_uname_release) {
4618 s = qemu_uname_release;
4619 } else {
4620 if (sys_uname(&buf))
4621 return 0;
4622 s = buf.release;
4623 }
4624 tmp = 0;
4625 for (i = 0; i < 3; i++) {
4626 n = 0;
4627 while (*s >= '0' && *s <= '9') {
4628 n *= 10;
4629 n += *s - '0';
4630 s++;
4631 }
4632 tmp = (tmp << 8) + n;
4633 if (*s == '.')
4634 s++;
4635 }
4636 osversion = tmp;
4637 return osversion;
4638 }
4639
4640
4641 static int open_self_maps(void *cpu_env, int fd)
4642 {
4643 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4644
4645 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4646 (unsigned long long)ts->info->stack_limit,
4647 (unsigned long long)(ts->stack_base + (TARGET_PAGE_SIZE - 1))
4648 & TARGET_PAGE_MASK,
4649 (unsigned long long)ts->stack_base);
4650
4651 return 0;
4652 }
4653
4654 static int open_self_stat(void *cpu_env, int fd)
4655 {
4656 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4657 abi_ulong start_stack = ts->info->start_stack;
4658 int i;
4659
4660 for (i = 0; i < 44; i++) {
4661 char buf[128];
4662 int len;
4663 uint64_t val = 0;
4664
4665 if (i == 27) {
4666 /* stack bottom */
4667 val = start_stack;
4668 }
4669 snprintf(buf, sizeof(buf), "%"PRId64 "%c", val, i == 43 ? '\n' : ' ');
4670 len = strlen(buf);
4671 if (write(fd, buf, len) != len) {
4672 return -1;
4673 }
4674 }
4675
4676 return 0;
4677 }
4678
4679 static int open_self_auxv(void *cpu_env, int fd)
4680 {
4681 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4682 abi_ulong auxv = ts->info->saved_auxv;
4683 abi_ulong len = ts->info->auxv_len;
4684 char *ptr;
4685
4686 /*
4687 * Auxiliary vector is stored in target process stack.
4688 * read in whole auxv vector and copy it to file
4689 */
4690 ptr = lock_user(VERIFY_READ, auxv, len, 0);
4691 if (ptr != NULL) {
4692 while (len > 0) {
4693 ssize_t r;
4694 r = write(fd, ptr, len);
4695 if (r <= 0) {
4696 break;
4697 }
4698 len -= r;
4699 ptr += r;
4700 }
4701 lseek(fd, 0, SEEK_SET);
4702 unlock_user(ptr, auxv, len);
4703 }
4704
4705 return 0;
4706 }
4707
4708 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
4709 {
4710 struct fake_open {
4711 const char *filename;
4712 int (*fill)(void *cpu_env, int fd);
4713 };
4714 const struct fake_open *fake_open;
4715 static const struct fake_open fakes[] = {
4716 { "/proc/self/maps", open_self_maps },
4717 { "/proc/self/stat", open_self_stat },
4718 { "/proc/self/auxv", open_self_auxv },
4719 { NULL, NULL }
4720 };
4721
4722 for (fake_open = fakes; fake_open->filename; fake_open++) {
4723 if (!strncmp(pathname, fake_open->filename,
4724 strlen(fake_open->filename))) {
4725 break;
4726 }
4727 }
4728
4729 if (fake_open->filename) {
4730 const char *tmpdir;
4731 char filename[PATH_MAX];
4732 int fd, r;
4733
4734 /* create temporary file to map stat to */
4735 tmpdir = getenv("TMPDIR");
4736 if (!tmpdir)
4737 tmpdir = "/tmp";
4738 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
4739 fd = mkstemp(filename);
4740 if (fd < 0) {
4741 return fd;
4742 }
4743 unlink(filename);
4744
4745 if ((r = fake_open->fill(cpu_env, fd))) {
4746 close(fd);
4747 return r;
4748 }
4749 lseek(fd, 0, SEEK_SET);
4750
4751 return fd;
4752 }
4753
4754 return get_errno(open(path(pathname), flags, mode));
4755 }
4756
4757 /* do_syscall() should always have a single exit point at the end so
4758 that actions, such as logging of syscall results, can be performed.
4759 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4760 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4761 abi_long arg2, abi_long arg3, abi_long arg4,
4762 abi_long arg5, abi_long arg6, abi_long arg7,
4763 abi_long arg8)
4764 {
4765 abi_long ret;
4766 struct stat st;
4767 struct statfs stfs;
4768 void *p;
4769
4770 #ifdef DEBUG
4771 gemu_log("syscall %d", num);
4772 #endif
4773 if(do_strace)
4774 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4775
4776 switch(num) {
4777 case TARGET_NR_exit:
4778 #ifdef CONFIG_USE_NPTL
4779 /* In old applications this may be used to implement _exit(2).
4780 However in threaded applictions it is used for thread termination,
4781 and _exit_group is used for application termination.
4782 Do thread termination if we have more then one thread. */
4783 /* FIXME: This probably breaks if a signal arrives. We should probably
4784 be disabling signals. */
4785 if (first_cpu->next_cpu) {
4786 TaskState *ts;
4787 CPUArchState **lastp;
4788 CPUArchState *p;
4789
4790 cpu_list_lock();
4791 lastp = &first_cpu;
4792 p = first_cpu;
4793 while (p && p != (CPUArchState *)cpu_env) {
4794 lastp = &p->next_cpu;
4795 p = p->next_cpu;
4796 }
4797 /* If we didn't find the CPU for this thread then something is
4798 horribly wrong. */
4799 if (!p)
4800 abort();
4801 /* Remove the CPU from the list. */
4802 *lastp = p->next_cpu;
4803 cpu_list_unlock();
4804 ts = ((CPUArchState *)cpu_env)->opaque;
4805 if (ts->child_tidptr) {
4806 put_user_u32(0, ts->child_tidptr);
4807 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4808 NULL, NULL, 0);
4809 }
4810 thread_env = NULL;
4811 g_free(cpu_env);
4812 g_free(ts);
4813 pthread_exit(NULL);
4814 }
4815 #endif
4816 #ifdef TARGET_GPROF
4817 _mcleanup();
4818 #endif
4819 gdb_exit(cpu_env, arg1);
4820 _exit(arg1);
4821 ret = 0; /* avoid warning */
4822 break;
4823 case TARGET_NR_read:
4824 if (arg3 == 0)
4825 ret = 0;
4826 else {
4827 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4828 goto efault;
4829 ret = get_errno(read(arg1, p, arg3));
4830 unlock_user(p, arg2, ret);
4831 }
4832 break;
4833 case TARGET_NR_write:
4834 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4835 goto efault;
4836 ret = get_errno(write(arg1, p, arg3));
4837 unlock_user(p, arg2, 0);
4838 break;
4839 case TARGET_NR_open:
4840 if (!(p = lock_user_string(arg1)))
4841 goto efault;
4842 ret = get_errno(do_open(cpu_env, p,
4843 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4844 arg3));
4845 unlock_user(p, arg1, 0);
4846 break;
4847 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4848 case TARGET_NR_openat:
4849 if (!(p = lock_user_string(arg2)))
4850 goto efault;
4851 ret = get_errno(sys_openat(arg1,
4852 path(p),
4853 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4854 arg4));
4855 unlock_user(p, arg2, 0);
4856 break;
4857 #endif
4858 case TARGET_NR_close:
4859 ret = get_errno(close(arg1));
4860 break;
4861 case TARGET_NR_brk:
4862 ret = do_brk(arg1);
4863 break;
4864 case TARGET_NR_fork:
4865 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4866 break;
4867 #ifdef TARGET_NR_waitpid
4868 case TARGET_NR_waitpid:
4869 {
4870 int status;
4871 ret = get_errno(waitpid(arg1, &status, arg3));
4872 if (!is_error(ret) && arg2 && ret
4873 && put_user_s32(host_to_target_waitstatus(status), arg2))
4874 goto efault;
4875 }
4876 break;
4877 #endif
4878 #ifdef TARGET_NR_waitid
4879 case TARGET_NR_waitid:
4880 {
4881 siginfo_t info;
4882 info.si_pid = 0;
4883 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4884 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4885 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4886 goto efault;
4887 host_to_target_siginfo(p, &info);
4888 unlock_user(p, arg3, sizeof(target_siginfo_t));
4889 }
4890 }
4891 break;
4892 #endif
4893 #ifdef TARGET_NR_creat /* not on alpha */
4894 case TARGET_NR_creat:
4895 if (!(p = lock_user_string(arg1)))
4896 goto efault;
4897 ret = get_errno(creat(p, arg2));
4898 unlock_user(p, arg1, 0);
4899 break;
4900 #endif
4901 case TARGET_NR_link:
4902 {
4903 void * p2;
4904 p = lock_user_string(arg1);
4905 p2 = lock_user_string(arg2);
4906 if (!p || !p2)
4907 ret = -TARGET_EFAULT;
4908 else
4909 ret = get_errno(link(p, p2));
4910 unlock_user(p2, arg2, 0);
4911 unlock_user(p, arg1, 0);
4912 }
4913 break;
4914 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4915 case TARGET_NR_linkat:
4916 {
4917 void * p2 = NULL;
4918 if (!arg2 || !arg4)
4919 goto efault;
4920 p = lock_user_string(arg2);
4921 p2 = lock_user_string(arg4);
4922 if (!p || !p2)
4923 ret = -TARGET_EFAULT;
4924 else
4925 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4926 unlock_user(p, arg2, 0);
4927 unlock_user(p2, arg4, 0);
4928 }
4929 break;
4930 #endif
4931 case TARGET_NR_unlink:
4932 if (!(p = lock_user_string(arg1)))
4933 goto efault;
4934 ret = get_errno(unlink(p));
4935 unlock_user(p, arg1, 0);
4936 break;
4937 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4938 case TARGET_NR_unlinkat:
4939 if (!(p = lock_user_string(arg2)))
4940 goto efault;
4941 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4942 unlock_user(p, arg2, 0);
4943 break;
4944 #endif
4945 case TARGET_NR_execve:
4946 {
4947 char **argp, **envp;
4948 int argc, envc;
4949 abi_ulong gp;
4950 abi_ulong guest_argp;
4951 abi_ulong guest_envp;
4952 abi_ulong addr;
4953 char **q;
4954 int total_size = 0;
4955
4956 argc = 0;
4957 guest_argp = arg2;
4958 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4959 if (get_user_ual(addr, gp))
4960 goto efault;
4961 if (!addr)
4962 break;
4963 argc++;
4964 }
4965 envc = 0;
4966 guest_envp = arg3;
4967 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4968 if (get_user_ual(addr, gp))
4969 goto efault;
4970 if (!addr)
4971 break;
4972 envc++;
4973 }
4974
4975 argp = alloca((argc + 1) * sizeof(void *));
4976 envp = alloca((envc + 1) * sizeof(void *));
4977
4978 for (gp = guest_argp, q = argp; gp;
4979 gp += sizeof(abi_ulong), q++) {
4980 if (get_user_ual(addr, gp))
4981 goto execve_efault;
4982 if (!addr)
4983 break;
4984 if (!(*q = lock_user_string(addr)))
4985 goto execve_efault;
4986 total_size += strlen(*q) + 1;
4987 }
4988 *q = NULL;
4989
4990 for (gp = guest_envp, q = envp; gp;
4991 gp += sizeof(abi_ulong), q++) {
4992 if (get_user_ual(addr, gp))
4993 goto execve_efault;
4994 if (!addr)
4995 break;
4996 if (!(*q = lock_user_string(addr)))
4997 goto execve_efault;
4998 total_size += strlen(*q) + 1;
4999 }
5000 *q = NULL;
5001
5002 /* This case will not be caught by the host's execve() if its
5003 page size is bigger than the target's. */
5004 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5005 ret = -TARGET_E2BIG;
5006 goto execve_end;
5007 }
5008 if (!(p = lock_user_string(arg1)))
5009 goto execve_efault;
5010 ret = get_errno(execve(p, argp, envp));
5011 unlock_user(p, arg1, 0);
5012
5013 goto execve_end;
5014
5015 execve_efault:
5016 ret = -TARGET_EFAULT;
5017
5018 execve_end:
5019 for (gp = guest_argp, q = argp; *q;
5020 gp += sizeof(abi_ulong), q++) {
5021 if (get_user_ual(addr, gp)
5022 || !addr)
5023 break;
5024 unlock_user(*q, addr, 0);
5025 }
5026 for (gp = guest_envp, q = envp; *q;
5027 gp += sizeof(abi_ulong), q++) {
5028 if (get_user_ual(addr, gp)
5029 || !addr)
5030 break;
5031 unlock_user(*q, addr, 0);
5032 }
5033 }
5034 break;
5035 case TARGET_NR_chdir:
5036 if (!(p = lock_user_string(arg1)))
5037 goto efault;
5038 ret = get_errno(chdir(p));
5039 unlock_user(p, arg1, 0);
5040 break;
5041 #ifdef TARGET_NR_time
5042 case TARGET_NR_time:
5043 {
5044 time_t host_time;
5045 ret = get_errno(time(&host_time));
5046 if (!is_error(ret)
5047 && arg1
5048 && put_user_sal(host_time, arg1))
5049 goto efault;
5050 }
5051 break;
5052 #endif
5053 case TARGET_NR_mknod:
5054 if (!(p = lock_user_string(arg1)))
5055 goto efault;
5056 ret = get_errno(mknod(p, arg2, arg3));
5057 unlock_user(p, arg1, 0);
5058 break;
5059 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5060 case TARGET_NR_mknodat:
5061 if (!(p = lock_user_string(arg2)))
5062 goto efault;
5063 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5064 unlock_user(p, arg2, 0);
5065 break;
5066 #endif
5067 case TARGET_NR_chmod:
5068 if (!(p = lock_user_string(arg1)))
5069 goto efault;
5070 ret = get_errno(chmod(p, arg2));
5071 unlock_user(p, arg1, 0);
5072 break;
5073 #ifdef TARGET_NR_break
5074 case TARGET_NR_break:
5075 goto unimplemented;
5076 #endif
5077 #ifdef TARGET_NR_oldstat
5078 case TARGET_NR_oldstat:
5079 goto unimplemented;
5080 #endif
5081 case TARGET_NR_lseek:
5082 ret = get_errno(lseek(arg1, arg2, arg3));
5083 break;
5084 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5085 /* Alpha specific */
5086 case TARGET_NR_getxpid:
5087 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5088 ret = get_errno(getpid());
5089 break;
5090 #endif
5091 #ifdef TARGET_NR_getpid
5092 case TARGET_NR_getpid:
5093 ret = get_errno(getpid());
5094 break;
5095 #endif
5096 case TARGET_NR_mount:
5097 {
5098 /* need to look at the data field */
5099 void *p2, *p3;
5100 p = lock_user_string(arg1);
5101 p2 = lock_user_string(arg2);
5102 p3 = lock_user_string(arg3);
5103 if (!p || !p2 || !p3)
5104 ret = -TARGET_EFAULT;
5105 else {
5106 /* FIXME - arg5 should be locked, but it isn't clear how to
5107 * do that since it's not guaranteed to be a NULL-terminated
5108 * string.
5109 */
5110 if ( ! arg5 )
5111 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5112 else
5113 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5114 }
5115 unlock_user(p, arg1, 0);
5116 unlock_user(p2, arg2, 0);
5117 unlock_user(p3, arg3, 0);
5118 break;
5119 }
5120 #ifdef TARGET_NR_umount
5121 case TARGET_NR_umount:
5122 if (!(p = lock_user_string(arg1)))
5123 goto efault;
5124 ret = get_errno(umount(p));
5125 unlock_user(p, arg1, 0);
5126 break;
5127 #endif
5128 #ifdef TARGET_NR_stime /* not on alpha */
5129 case TARGET_NR_stime:
5130 {
5131 time_t host_time;
5132 if (get_user_sal(host_time, arg1))
5133 goto efault;
5134 ret = get_errno(stime(&host_time));
5135 }
5136 break;
5137 #endif
5138 case TARGET_NR_ptrace:
5139 goto unimplemented;
5140 #ifdef TARGET_NR_alarm /* not on alpha */
5141 case TARGET_NR_alarm:
5142 ret = alarm(arg1);
5143 break;
5144 #endif
5145 #ifdef TARGET_NR_oldfstat
5146 case TARGET_NR_oldfstat:
5147 goto unimplemented;
5148 #endif
5149 #ifdef TARGET_NR_pause /* not on alpha */
5150 case TARGET_NR_pause:
5151 ret = get_errno(pause());
5152 break;
5153 #endif
5154 #ifdef TARGET_NR_utime
5155 case TARGET_NR_utime:
5156 {
5157 struct utimbuf tbuf, *host_tbuf;
5158 struct target_utimbuf *target_tbuf;
5159 if (arg2) {
5160 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5161 goto efault;
5162 tbuf.actime = tswapal(target_tbuf->actime);
5163 tbuf.modtime = tswapal(target_tbuf->modtime);
5164 unlock_user_struct(target_tbuf, arg2, 0);
5165 host_tbuf = &tbuf;
5166 } else {
5167 host_tbuf = NULL;
5168 }
5169 if (!(p = lock_user_string(arg1)))
5170 goto efault;
5171 ret = get_errno(utime(p, host_tbuf));
5172 unlock_user(p, arg1, 0);
5173 }
5174 break;
5175 #endif
5176 case TARGET_NR_utimes:
5177 {
5178 struct timeval *tvp, tv[2];
5179 if (arg2) {
5180 if (copy_from_user_timeval(&tv[0], arg2)
5181 || copy_from_user_timeval(&tv[1],
5182 arg2 + sizeof(struct target_timeval)))
5183 goto efault;
5184 tvp = tv;
5185 } else {
5186 tvp = NULL;
5187 }
5188 if (!(p = lock_user_string(arg1)))
5189 goto efault;
5190 ret = get_errno(utimes(p, tvp));
5191 unlock_user(p, arg1, 0);
5192 }
5193 break;
5194 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5195 case TARGET_NR_futimesat:
5196 {
5197 struct timeval *tvp, tv[2];
5198 if (arg3) {
5199 if (copy_from_user_timeval(&tv[0], arg3)
5200 || copy_from_user_timeval(&tv[1],
5201 arg3 + sizeof(struct target_timeval)))
5202 goto efault;
5203 tvp = tv;
5204 } else {
5205 tvp = NULL;
5206 }
5207 if (!(p = lock_user_string(arg2)))
5208 goto efault;
5209 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5210 unlock_user(p, arg2, 0);
5211 }
5212 break;
5213 #endif
5214 #ifdef TARGET_NR_stty
5215 case TARGET_NR_stty:
5216 goto unimplemented;
5217 #endif
5218 #ifdef TARGET_NR_gtty
5219 case TARGET_NR_gtty:
5220 goto unimplemented;
5221 #endif
5222 case TARGET_NR_access:
5223 if (!(p = lock_user_string(arg1)))
5224 goto efault;
5225 ret = get_errno(access(path(p), arg2));
5226 unlock_user(p, arg1, 0);
5227 break;
5228 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5229 case TARGET_NR_faccessat:
5230 if (!(p = lock_user_string(arg2)))
5231 goto efault;
5232 ret = get_errno(sys_faccessat(arg1, p, arg3));
5233 unlock_user(p, arg2, 0);
5234 break;
5235 #endif
5236 #ifdef TARGET_NR_nice /* not on alpha */
5237 case TARGET_NR_nice:
5238 ret = get_errno(nice(arg1));
5239 break;
5240 #endif
5241 #ifdef TARGET_NR_ftime
5242 case TARGET_NR_ftime:
5243 goto unimplemented;
5244 #endif
5245 case TARGET_NR_sync:
5246 sync();
5247 ret = 0;
5248 break;
5249 case TARGET_NR_kill:
5250 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5251 break;
5252 case TARGET_NR_rename:
5253 {
5254 void *p2;
5255 p = lock_user_string(arg1);
5256 p2 = lock_user_string(arg2);
5257 if (!p || !p2)
5258 ret = -TARGET_EFAULT;
5259 else
5260 ret = get_errno(rename(p, p2));
5261 unlock_user(p2, arg2, 0);
5262 unlock_user(p, arg1, 0);
5263 }
5264 break;
5265 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5266 case TARGET_NR_renameat:
5267 {
5268 void *p2;
5269 p = lock_user_string(arg2);
5270 p2 = lock_user_string(arg4);
5271 if (!p || !p2)
5272 ret = -TARGET_EFAULT;
5273 else
5274 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5275 unlock_user(p2, arg4, 0);
5276 unlock_user(p, arg2, 0);
5277 }
5278 break;
5279 #endif
5280 case TARGET_NR_mkdir:
5281 if (!(p = lock_user_string(arg1)))
5282 goto efault;
5283 ret = get_errno(mkdir(p, arg2));
5284 unlock_user(p, arg1, 0);
5285 break;
5286 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5287 case TARGET_NR_mkdirat:
5288 if (!(p = lock_user_string(arg2)))
5289 goto efault;
5290 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5291 unlock_user(p, arg2, 0);
5292 break;
5293 #endif
5294 case TARGET_NR_rmdir:
5295 if (!(p = lock_user_string(arg1)))
5296 goto efault;
5297 ret = get_errno(rmdir(p));
5298 unlock_user(p, arg1, 0);
5299 break;
5300 case TARGET_NR_dup:
5301 ret = get_errno(dup(arg1));
5302 break;
5303 case TARGET_NR_pipe:
5304 ret = do_pipe(cpu_env, arg1, 0, 0);
5305 break;
5306 #ifdef TARGET_NR_pipe2
5307 case TARGET_NR_pipe2:
5308 ret = do_pipe(cpu_env, arg1, arg2, 1);
5309 break;
5310 #endif
5311 case TARGET_NR_times:
5312 {
5313 struct target_tms *tmsp;
5314 struct tms tms;
5315 ret = get_errno(times(&tms));
5316 if (arg1) {
5317 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5318 if (!tmsp)
5319 goto efault;
5320 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5321 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5322 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5323 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5324 }
5325 if (!is_error(ret))
5326 ret = host_to_target_clock_t(ret);
5327 }
5328 break;
5329 #ifdef TARGET_NR_prof
5330 case TARGET_NR_prof:
5331 goto unimplemented;
5332 #endif
5333 #ifdef TARGET_NR_signal
5334 case TARGET_NR_signal:
5335 goto unimplemented;
5336 #endif
5337 case TARGET_NR_acct:
5338 if (arg1 == 0) {
5339 ret = get_errno(acct(NULL));
5340 } else {
5341 if (!(p = lock_user_string(arg1)))
5342 goto efault;
5343 ret = get_errno(acct(path(p)));
5344 unlock_user(p, arg1, 0);
5345 }
5346 break;
5347 #ifdef TARGET_NR_umount2 /* not on alpha */
5348 case TARGET_NR_umount2:
5349 if (!(p = lock_user_string(arg1)))
5350 goto efault;
5351 ret = get_errno(umount2(p, arg2));
5352 unlock_user(p, arg1, 0);
5353 break;
5354 #endif
5355 #ifdef TARGET_NR_lock
5356 case TARGET_NR_lock:
5357 goto unimplemented;
5358 #endif
5359 case TARGET_NR_ioctl:
5360 ret = do_ioctl(arg1, arg2, arg3);
5361 break;
5362 case TARGET_NR_fcntl:
5363 ret = do_fcntl(arg1, arg2, arg3);
5364 break;
5365 #ifdef TARGET_NR_mpx
5366 case TARGET_NR_mpx:
5367 goto unimplemented;
5368 #endif
5369 case TARGET_NR_setpgid:
5370 ret = get_errno(setpgid(arg1, arg2));
5371 break;
5372 #ifdef TARGET_NR_ulimit
5373 case TARGET_NR_ulimit:
5374 goto unimplemented;
5375 #endif
5376 #ifdef TARGET_NR_oldolduname
5377 case TARGET_NR_oldolduname:
5378 goto unimplemented;
5379 #endif
5380 case TARGET_NR_umask:
5381 ret = get_errno(umask(arg1));
5382 break;
5383 case TARGET_NR_chroot:
5384 if (!(p = lock_user_string(arg1)))
5385 goto efault;
5386 ret = get_errno(chroot(p));
5387 unlock_user(p, arg1, 0);
5388 break;
5389 case TARGET_NR_ustat:
5390 goto unimplemented;
5391 case TARGET_NR_dup2:
5392 ret = get_errno(dup2(arg1, arg2));
5393 break;
5394 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5395 case TARGET_NR_dup3:
5396 ret = get_errno(dup3(arg1, arg2, arg3));
5397 break;
5398 #endif
5399 #ifdef TARGET_NR_getppid /* not on alpha */
5400 case TARGET_NR_getppid:
5401 ret = get_errno(getppid());
5402 break;
5403 #endif
5404 case TARGET_NR_getpgrp:
5405 ret = get_errno(getpgrp());
5406 break;
5407 case TARGET_NR_setsid:
5408 ret = get_errno(setsid());
5409 break;
5410 #ifdef TARGET_NR_sigaction
5411 case TARGET_NR_sigaction:
5412 {
5413 #if defined(TARGET_ALPHA)
5414 struct target_sigaction act, oact, *pact = 0;
5415 struct target_old_sigaction *old_act;
5416 if (arg2) {
5417 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5418 goto efault;
5419 act._sa_handler = old_act->_sa_handler;
5420 target_siginitset(&act.sa_mask, old_act->sa_mask);
5421 act.sa_flags = old_act->sa_flags;
5422 act.sa_restorer = 0;
5423 unlock_user_struct(old_act, arg2, 0);
5424 pact = &act;
5425 }
5426 ret = get_errno(do_sigaction(arg1, pact, &oact));
5427 if (!is_error(ret) && arg3) {
5428 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5429 goto efault;
5430 old_act->_sa_handler = oact._sa_handler;
5431 old_act->sa_mask = oact.sa_mask.sig[0];
5432 old_act->sa_flags = oact.sa_flags;
5433 unlock_user_struct(old_act, arg3, 1);
5434 }
5435 #elif defined(TARGET_MIPS)
5436 struct target_sigaction act, oact, *pact, *old_act;
5437
5438 if (arg2) {
5439 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5440 goto efault;
5441 act._sa_handler = old_act->_sa_handler;
5442 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5443 act.sa_flags = old_act->sa_flags;
5444 unlock_user_struct(old_act, arg2, 0);
5445 pact = &act;
5446 } else {
5447 pact = NULL;
5448 }
5449
5450 ret = get_errno(do_sigaction(arg1, pact, &oact));
5451
5452 if (!is_error(ret) && arg3) {
5453 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5454 goto efault;
5455 old_act->_sa_handler = oact._sa_handler;
5456 old_act->sa_flags = oact.sa_flags;
5457 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5458 old_act->sa_mask.sig[1] = 0;
5459 old_act->sa_mask.sig[2] = 0;
5460 old_act->sa_mask.sig[3] = 0;
5461 unlock_user_struct(old_act, arg3, 1);
5462 }
5463 #else
5464 struct target_old_sigaction *old_act;
5465 struct target_sigaction act, oact, *pact;
5466 if (arg2) {
5467 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5468 goto efault;
5469 act._sa_handler = old_act->_sa_handler;
5470 target_siginitset(&act.sa_mask, old_act->sa_mask);
5471 act.sa_flags = old_act->sa_flags;
5472 act.sa_restorer = old_act->sa_restorer;
5473 unlock_user_struct(old_act, arg2, 0);
5474 pact = &act;
5475 } else {
5476 pact = NULL;
5477 }
5478 ret = get_errno(do_sigaction(arg1, pact, &oact));
5479 if (!is_error(ret) && arg3) {
5480 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5481 goto efault;
5482 old_act->_sa_handler = oact._sa_handler;
5483 old_act->sa_mask = oact.sa_mask.sig[0];
5484 old_act->sa_flags = oact.sa_flags;
5485 old_act->sa_restorer = oact.sa_restorer;
5486 unlock_user_struct(old_act, arg3, 1);
5487 }
5488 #endif
5489 }
5490 break;
5491 #endif
5492 case TARGET_NR_rt_sigaction:
5493 {
5494 #if defined(TARGET_ALPHA)
5495 struct target_sigaction act, oact, *pact = 0;
5496 struct target_rt_sigaction *rt_act;
5497 /* ??? arg4 == sizeof(sigset_t). */
5498 if (arg2) {
5499 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5500 goto efault;
5501 act._sa_handler = rt_act->_sa_handler;
5502 act.sa_mask = rt_act->sa_mask;
5503 act.sa_flags = rt_act->sa_flags;
5504 act.sa_restorer = arg5;
5505 unlock_user_struct(rt_act, arg2, 0);
5506 pact = &act;
5507 }
5508 ret = get_errno(do_sigaction(arg1, pact, &oact));
5509 if (!is_error(ret) && arg3) {
5510 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5511 goto efault;
5512 rt_act->_sa_handler = oact._sa_handler;
5513 rt_act->sa_mask = oact.sa_mask;
5514 rt_act->sa_flags = oact.sa_flags;
5515 unlock_user_struct(rt_act, arg3, 1);
5516 }
5517 #else
5518 struct target_sigaction *act;
5519 struct target_sigaction *oact;
5520
5521 if (arg2) {
5522 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5523 goto efault;
5524 } else
5525 act = NULL;
5526 if (arg3) {
5527 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5528 ret = -TARGET_EFAULT;
5529 goto rt_sigaction_fail;
5530 }
5531 } else
5532 oact = NULL;
5533 ret = get_errno(do_sigaction(arg1, act, oact));
5534 rt_sigaction_fail:
5535 if (act)
5536 unlock_user_struct(act, arg2, 0);
5537 if (oact)
5538 unlock_user_struct(oact, arg3, 1);
5539 #endif
5540 }
5541 break;
5542 #ifdef TARGET_NR_sgetmask /* not on alpha */
5543 case TARGET_NR_sgetmask:
5544 {
5545 sigset_t cur_set;
5546 abi_ulong target_set;
5547 sigprocmask(0, NULL, &cur_set);
5548 host_to_target_old_sigset(&target_set, &cur_set);
5549 ret = target_set;
5550 }
5551 break;
5552 #endif
5553 #ifdef TARGET_NR_ssetmask /* not on alpha */
5554 case TARGET_NR_ssetmask:
5555 {
5556 sigset_t set, oset, cur_set;
5557 abi_ulong target_set = arg1;
5558 sigprocmask(0, NULL, &cur_set);
5559 target_to_host_old_sigset(&set, &target_set);
5560 sigorset(&set, &set, &cur_set);
5561 sigprocmask(SIG_SETMASK, &set, &oset);
5562 host_to_target_old_sigset(&target_set, &oset);
5563 ret = target_set;
5564 }
5565 break;
5566 #endif
5567 #ifdef TARGET_NR_sigprocmask
5568 case TARGET_NR_sigprocmask:
5569 {
5570 #if defined(TARGET_ALPHA)
5571 sigset_t set, oldset;
5572 abi_ulong mask;
5573 int how;
5574
5575 switch (arg1) {
5576 case TARGET_SIG_BLOCK:
5577 how = SIG_BLOCK;
5578 break;
5579 case TARGET_SIG_UNBLOCK:
5580 how = SIG_UNBLOCK;
5581 break;
5582 case TARGET_SIG_SETMASK:
5583 how = SIG_SETMASK;
5584 break;
5585 default:
5586 ret = -TARGET_EINVAL;
5587 goto fail;
5588 }
5589 mask = arg2;
5590 target_to_host_old_sigset(&set, &mask);
5591
5592 ret = get_errno(sigprocmask(how, &set, &oldset));
5593
5594 if (!is_error(ret)) {
5595 host_to_target_old_sigset(&mask, &oldset);
5596 ret = mask;
5597 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5598 }
5599 #else
5600 sigset_t set, oldset, *set_ptr;
5601 int how;
5602
5603 if (arg2) {
5604 switch (arg1) {
5605 case TARGET_SIG_BLOCK:
5606 how = SIG_BLOCK;
5607 break;
5608 case TARGET_SIG_UNBLOCK:
5609 how = SIG_UNBLOCK;
5610 break;
5611 case TARGET_SIG_SETMASK:
5612 how = SIG_SETMASK;
5613 break;
5614 default:
5615 ret = -TARGET_EINVAL;
5616 goto fail;
5617 }
5618 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5619 goto efault;
5620 target_to_host_old_sigset(&set, p);
5621 unlock_user(p, arg2, 0);
5622 set_ptr = &set;
5623 } else {
5624 how = 0;
5625 set_ptr = NULL;
5626 }
5627 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5628 if (!is_error(ret) && arg3) {
5629 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5630 goto efault;
5631 host_to_target_old_sigset(p, &oldset);
5632 unlock_user(p, arg3, sizeof(target_sigset_t));
5633 }
5634 #endif
5635 }
5636 break;
5637 #endif
5638 case TARGET_NR_rt_sigprocmask:
5639 {
5640 int how = arg1;
5641 sigset_t set, oldset, *set_ptr;
5642
5643 if (arg2) {
5644 switch(how) {
5645 case TARGET_SIG_BLOCK:
5646 how = SIG_BLOCK;
5647 break;
5648 case TARGET_SIG_UNBLOCK:
5649 how = SIG_UNBLOCK;
5650 break;
5651 case TARGET_SIG_SETMASK:
5652 how = SIG_SETMASK;
5653 break;
5654 default:
5655 ret = -TARGET_EINVAL;
5656 goto fail;
5657 }
5658 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5659 goto efault;
5660 target_to_host_sigset(&set, p);
5661 unlock_user(p, arg2, 0);
5662 set_ptr = &set;
5663 } else {
5664 how = 0;
5665 set_ptr = NULL;
5666 }
5667 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5668 if (!is_error(ret) && arg3) {
5669 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5670 goto efault;
5671 host_to_target_sigset(p, &oldset);
5672 unlock_user(p, arg3, sizeof(target_sigset_t));
5673 }
5674 }
5675 break;
5676 #ifdef TARGET_NR_sigpending
5677 case TARGET_NR_sigpending:
5678 {
5679 sigset_t set;
5680 ret = get_errno(sigpending(&set));
5681 if (!is_error(ret)) {
5682 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5683 goto efault;
5684 host_to_target_old_sigset(p, &set);
5685 unlock_user(p, arg1, sizeof(target_sigset_t));
5686 }
5687 }
5688 break;
5689 #endif
5690 case TARGET_NR_rt_sigpending:
5691 {
5692 sigset_t set;
5693 ret = get_errno(sigpending(&set));
5694 if (!is_error(ret)) {
5695 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5696 goto efault;
5697 host_to_target_sigset(p, &set);
5698 unlock_user(p, arg1, sizeof(target_sigset_t));
5699 }
5700 }
5701 break;
5702 #ifdef TARGET_NR_sigsuspend
5703 case TARGET_NR_sigsuspend:
5704 {
5705 sigset_t set;
5706 #if defined(TARGET_ALPHA)
5707 abi_ulong mask = arg1;
5708 target_to_host_old_sigset(&set, &mask);
5709 #else
5710 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5711 goto efault;
5712 target_to_host_old_sigset(&set, p);
5713 unlock_user(p, arg1, 0);
5714 #endif
5715 ret = get_errno(sigsuspend(&set));
5716 }
5717 break;
5718 #endif
5719 case TARGET_NR_rt_sigsuspend:
5720 {
5721 sigset_t set;
5722 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5723 goto efault;
5724 target_to_host_sigset(&set, p);
5725 unlock_user(p, arg1, 0);
5726 ret = get_errno(sigsuspend(&set));
5727 }
5728 break;
5729 case TARGET_NR_rt_sigtimedwait:
5730 {
5731 sigset_t set;
5732 struct timespec uts, *puts;
5733 siginfo_t uinfo;
5734
5735 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5736 goto efault;
5737 target_to_host_sigset(&set, p);
5738 unlock_user(p, arg1, 0);
5739 if (arg3) {
5740 puts = &uts;
5741 target_to_host_timespec(puts, arg3);
5742 } else {
5743 puts = NULL;
5744 }
5745 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5746 if (!is_error(ret) && arg2) {
5747 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5748 goto efault;
5749 host_to_target_siginfo(p, &uinfo);
5750 unlock_user(p, arg2, sizeof(target_siginfo_t));
5751 }
5752 }
5753 break;
5754 case TARGET_NR_rt_sigqueueinfo:
5755 {
5756 siginfo_t uinfo;
5757 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5758 goto efault;
5759 target_to_host_siginfo(&uinfo, p);
5760 unlock_user(p, arg1, 0);
5761 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5762 }
5763 break;
5764 #ifdef TARGET_NR_sigreturn
5765 case TARGET_NR_sigreturn:
5766 /* NOTE: ret is eax, so not transcoding must be done */
5767 ret = do_sigreturn(cpu_env);
5768 break;
5769 #endif
5770 case TARGET_NR_rt_sigreturn:
5771 /* NOTE: ret is eax, so not transcoding must be done */
5772 ret = do_rt_sigreturn(cpu_env);
5773 break;
5774 case TARGET_NR_sethostname:
5775 if (!(p = lock_user_string(arg1)))
5776 goto efault;
5777 ret = get_errno(sethostname(p, arg2));
5778 unlock_user(p, arg1, 0);
5779 break;
5780 case TARGET_NR_setrlimit:
5781 {
5782 int resource = target_to_host_resource(arg1);
5783 struct target_rlimit *target_rlim;
5784 struct rlimit rlim;
5785 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5786 goto efault;
5787 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5788 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5789 unlock_user_struct(target_rlim, arg2, 0);
5790 ret = get_errno(setrlimit(resource, &rlim));
5791 }
5792 break;
5793 case TARGET_NR_getrlimit:
5794 {
5795 int resource = target_to_host_resource(arg1);
5796 struct target_rlimit *target_rlim;
5797 struct rlimit rlim;
5798
5799 ret = get_errno(getrlimit(resource, &rlim));
5800 if (!is_error(ret)) {
5801 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5802 goto efault;
5803 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5804 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5805 unlock_user_struct(target_rlim, arg2, 1);
5806 }
5807 }
5808 break;
5809 case TARGET_NR_getrusage:
5810 {
5811 struct rusage rusage;
5812 ret = get_errno(getrusage(arg1, &rusage));
5813 if (!is_error(ret)) {
5814 host_to_target_rusage(arg2, &rusage);
5815 }
5816 }
5817 break;
5818 case TARGET_NR_gettimeofday:
5819 {
5820 struct timeval tv;
5821 ret = get_errno(gettimeofday(&tv, NULL));
5822 if (!is_error(ret)) {
5823 if (copy_to_user_timeval(arg1, &tv))
5824 goto efault;
5825 }
5826 }
5827 break;
5828 case TARGET_NR_settimeofday:
5829 {
5830 struct timeval tv;
5831 if (copy_from_user_timeval(&tv, arg1))
5832 goto efault;
5833 ret = get_errno(settimeofday(&tv, NULL));
5834 }
5835 break;
5836 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5837 case TARGET_NR_select:
5838 {
5839 struct target_sel_arg_struct *sel;
5840 abi_ulong inp, outp, exp, tvp;
5841 long nsel;
5842
5843 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5844 goto efault;
5845 nsel = tswapal(sel->n);
5846 inp = tswapal(sel->inp);
5847 outp = tswapal(sel->outp);
5848 exp = tswapal(sel->exp);
5849 tvp = tswapal(sel->tvp);
5850 unlock_user_struct(sel, arg1, 0);
5851 ret = do_select(nsel, inp, outp, exp, tvp);
5852 }
5853 break;
5854 #endif
5855 #ifdef TARGET_NR_pselect6
5856 case TARGET_NR_pselect6:
5857 {
5858 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
5859 fd_set rfds, wfds, efds;
5860 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
5861 struct timespec ts, *ts_ptr;
5862
5863 /*
5864 * The 6th arg is actually two args smashed together,
5865 * so we cannot use the C library.
5866 */
5867 sigset_t set;
5868 struct {
5869 sigset_t *set;
5870 size_t size;
5871 } sig, *sig_ptr;
5872
5873 abi_ulong arg_sigset, arg_sigsize, *arg7;
5874 target_sigset_t *target_sigset;
5875
5876 n = arg1;
5877 rfd_addr = arg2;
5878 wfd_addr = arg3;
5879 efd_addr = arg4;
5880 ts_addr = arg5;
5881
5882 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
5883 if (ret) {
5884 goto fail;
5885 }
5886 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
5887 if (ret) {
5888 goto fail;
5889 }
5890 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
5891 if (ret) {
5892 goto fail;
5893 }
5894
5895 /*
5896 * This takes a timespec, and not a timeval, so we cannot
5897 * use the do_select() helper ...
5898 */
5899 if (ts_addr) {
5900 if (target_to_host_timespec(&ts, ts_addr)) {
5901 goto efault;
5902 }
5903 ts_ptr = &ts;
5904 } else {
5905 ts_ptr = NULL;
5906 }
5907
5908 /* Extract the two packed args for the sigset */
5909 if (arg6) {
5910 sig_ptr = &sig;
5911 sig.size = _NSIG / 8;
5912
5913 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
5914 if (!arg7) {
5915 goto efault;
5916 }
5917 arg_sigset = tswapal(arg7[0]);
5918 arg_sigsize = tswapal(arg7[1]);
5919 unlock_user(arg7, arg6, 0);
5920
5921 if (arg_sigset) {
5922 sig.set = &set;
5923 if (arg_sigsize != sizeof(*target_sigset)) {
5924 /* Like the kernel, we enforce correct size sigsets */
5925 ret = -TARGET_EINVAL;
5926 goto fail;
5927 }
5928 target_sigset = lock_user(VERIFY_READ, arg_sigset,
5929 sizeof(*target_sigset), 1);
5930 if (!target_sigset) {
5931 goto efault;
5932 }
5933 target_to_host_sigset(&set, target_sigset);
5934 unlock_user(target_sigset, arg_sigset, 0);
5935 } else {
5936 sig.set = NULL;
5937 }
5938 } else {
5939 sig_ptr = NULL;
5940 }
5941
5942 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
5943 ts_ptr, sig_ptr));
5944
5945 if (!is_error(ret)) {
5946 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
5947 goto efault;
5948 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
5949 goto efault;
5950 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
5951 goto efault;
5952
5953 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
5954 goto efault;
5955 }
5956 }
5957 break;
5958 #endif
5959 case TARGET_NR_symlink:
5960 {
5961 void *p2;
5962 p = lock_user_string(arg1);
5963 p2 = lock_user_string(arg2);
5964 if (!p || !p2)
5965 ret = -TARGET_EFAULT;
5966 else
5967 ret = get_errno(symlink(p, p2));
5968 unlock_user(p2, arg2, 0);
5969 unlock_user(p, arg1, 0);
5970 }
5971 break;
5972 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5973 case TARGET_NR_symlinkat:
5974 {
5975 void *p2;
5976 p = lock_user_string(arg1);
5977 p2 = lock_user_string(arg3);
5978 if (!p || !p2)
5979 ret = -TARGET_EFAULT;
5980 else
5981 ret = get_errno(sys_symlinkat(p, arg2, p2));
5982 unlock_user(p2, arg3, 0);
5983 unlock_user(p, arg1, 0);
5984 }
5985 break;
5986 #endif
5987 #ifdef TARGET_NR_oldlstat
5988 case TARGET_NR_oldlstat:
5989 goto unimplemented;
5990 #endif
5991 case TARGET_NR_readlink:
5992 {
5993 void *p2, *temp;
5994 p = lock_user_string(arg1);
5995 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5996 if (!p || !p2)
5997 ret = -TARGET_EFAULT;
5998 else {
5999 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6000 char real[PATH_MAX];
6001 temp = realpath(exec_path,real);
6002 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6003 snprintf((char *)p2, arg3, "%s", real);
6004 }
6005 else
6006 ret = get_errno(readlink(path(p), p2, arg3));
6007 }
6008 unlock_user(p2, arg2, ret);
6009 unlock_user(p, arg1, 0);
6010 }
6011 break;
6012 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6013 case TARGET_NR_readlinkat:
6014 {
6015 void *p2;
6016 p = lock_user_string(arg2);
6017 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6018 if (!p || !p2)
6019 ret = -TARGET_EFAULT;
6020 else
6021 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6022 unlock_user(p2, arg3, ret);
6023 unlock_user(p, arg2, 0);
6024 }
6025 break;
6026 #endif
6027 #ifdef TARGET_NR_uselib
6028 case TARGET_NR_uselib:
6029 goto unimplemented;
6030 #endif
6031 #ifdef TARGET_NR_swapon
6032 case TARGET_NR_swapon:
6033 if (!(p = lock_user_string(arg1)))
6034 goto efault;
6035 ret = get_errno(swapon(p, arg2));
6036 unlock_user(p, arg1, 0);
6037 break;
6038 #endif
6039 case TARGET_NR_reboot:
6040 if (!(p = lock_user_string(arg4)))
6041 goto efault;
6042 ret = reboot(arg1, arg2, arg3, p);
6043 unlock_user(p, arg4, 0);
6044 break;
6045 #ifdef TARGET_NR_readdir
6046 case TARGET_NR_readdir:
6047 goto unimplemented;
6048 #endif
6049 #ifdef TARGET_NR_mmap
6050 case TARGET_NR_mmap:
6051 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6052 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6053 || defined(TARGET_S390X)
6054 {
6055 abi_ulong *v;
6056 abi_ulong v1, v2, v3, v4, v5, v6;
6057 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6058 goto efault;
6059 v1 = tswapal(v[0]);
6060 v2 = tswapal(v[1]);
6061 v3 = tswapal(v[2]);
6062 v4 = tswapal(v[3]);
6063 v5 = tswapal(v[4]);
6064 v6 = tswapal(v[5]);
6065 unlock_user(v, arg1, 0);
6066 ret = get_errno(target_mmap(v1, v2, v3,
6067 target_to_host_bitmask(v4, mmap_flags_tbl),
6068 v5, v6));
6069 }
6070 #else
6071 ret = get_errno(target_mmap(arg1, arg2, arg3,
6072 target_to_host_bitmask(arg4, mmap_flags_tbl),
6073 arg5,
6074 arg6));
6075 #endif
6076 break;
6077 #endif
6078 #ifdef TARGET_NR_mmap2
6079 case TARGET_NR_mmap2:
6080 #ifndef MMAP_SHIFT
6081 #define MMAP_SHIFT 12
6082 #endif
6083 ret = get_errno(target_mmap(arg1, arg2, arg3,
6084 target_to_host_bitmask(arg4, mmap_flags_tbl),
6085 arg5,
6086 arg6 << MMAP_SHIFT));
6087 break;
6088 #endif
6089 case TARGET_NR_munmap:
6090 ret = get_errno(target_munmap(arg1, arg2));
6091 break;
6092 case TARGET_NR_mprotect:
6093 {
6094 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6095 /* Special hack to detect libc making the stack executable. */
6096 if ((arg3 & PROT_GROWSDOWN)
6097 && arg1 >= ts->info->stack_limit
6098 && arg1 <= ts->info->start_stack) {
6099 arg3 &= ~PROT_GROWSDOWN;
6100 arg2 = arg2 + arg1 - ts->info->stack_limit;
6101 arg1 = ts->info->stack_limit;
6102 }
6103 }
6104 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6105 break;
6106 #ifdef TARGET_NR_mremap
6107 case TARGET_NR_mremap:
6108 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6109 break;
6110 #endif
6111 /* ??? msync/mlock/munlock are broken for softmmu. */
6112 #ifdef TARGET_NR_msync
6113 case TARGET_NR_msync:
6114 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6115 break;
6116 #endif
6117 #ifdef TARGET_NR_mlock
6118 case TARGET_NR_mlock:
6119 ret = get_errno(mlock(g2h(arg1), arg2));
6120 break;
6121 #endif
6122 #ifdef TARGET_NR_munlock
6123 case TARGET_NR_munlock:
6124 ret = get_errno(munlock(g2h(arg1), arg2));
6125 break;
6126 #endif
6127 #ifdef TARGET_NR_mlockall
6128 case TARGET_NR_mlockall:
6129 ret = get_errno(mlockall(arg1));
6130 break;
6131 #endif
6132 #ifdef TARGET_NR_munlockall
6133 case TARGET_NR_munlockall:
6134 ret = get_errno(munlockall());
6135 break;
6136 #endif
6137 case TARGET_NR_truncate:
6138 if (!(p = lock_user_string(arg1)))
6139 goto efault;
6140 ret = get_errno(truncate(p, arg2));
6141 unlock_user(p, arg1, 0);
6142 break;
6143 case TARGET_NR_ftruncate:
6144 ret = get_errno(ftruncate(arg1, arg2));
6145 break;
6146 case TARGET_NR_fchmod:
6147 ret = get_errno(fchmod(arg1, arg2));
6148 break;
6149 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6150 case TARGET_NR_fchmodat:
6151 if (!(p = lock_user_string(arg2)))
6152 goto efault;
6153 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6154 unlock_user(p, arg2, 0);
6155 break;
6156 #endif
6157 case TARGET_NR_getpriority:
6158 /* libc does special remapping of the return value of
6159 * sys_getpriority() so it's just easiest to call
6160 * sys_getpriority() directly rather than through libc. */
6161 ret = get_errno(sys_getpriority(arg1, arg2));
6162 break;
6163 case TARGET_NR_setpriority:
6164 ret = get_errno(setpriority(arg1, arg2, arg3));
6165 break;
6166 #ifdef TARGET_NR_profil
6167 case TARGET_NR_profil:
6168 goto unimplemented;
6169 #endif
6170 case TARGET_NR_statfs:
6171 if (!(p = lock_user_string(arg1)))
6172 goto efault;
6173 ret = get_errno(statfs(path(p), &stfs));
6174 unlock_user(p, arg1, 0);
6175 convert_statfs:
6176 if (!is_error(ret)) {
6177 struct target_statfs *target_stfs;
6178
6179 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6180 goto efault;
6181 __put_user(stfs.f_type, &target_stfs->f_type);
6182 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6183 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6184 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6185 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6186 __put_user(stfs.f_files, &target_stfs->f_files);
6187 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6188 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6189 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6190 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6191 unlock_user_struct(target_stfs, arg2, 1);
6192 }
6193 break;
6194 case TARGET_NR_fstatfs:
6195 ret = get_errno(fstatfs(arg1, &stfs));
6196 goto convert_statfs;
6197 #ifdef TARGET_NR_statfs64
6198 case TARGET_NR_statfs64:
6199 if (!(p = lock_user_string(arg1)))
6200 goto efault;
6201 ret = get_errno(statfs(path(p), &stfs));
6202 unlock_user(p, arg1, 0);
6203 convert_statfs64:
6204 if (!is_error(ret)) {
6205 struct target_statfs64 *target_stfs;
6206
6207 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6208 goto efault;
6209 __put_user(stfs.f_type, &target_stfs->f_type);
6210 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6211 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6212 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6213 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6214 __put_user(stfs.f_files, &target_stfs->f_files);
6215 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6216 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6217 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6218 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6219 unlock_user_struct(target_stfs, arg3, 1);
6220 }
6221 break;
6222 case TARGET_NR_fstatfs64:
6223 ret = get_errno(fstatfs(arg1, &stfs));
6224 goto convert_statfs64;
6225 #endif
6226 #ifdef TARGET_NR_ioperm
6227 case TARGET_NR_ioperm:
6228 goto unimplemented;
6229 #endif
6230 #ifdef TARGET_NR_socketcall
6231 case TARGET_NR_socketcall:
6232 ret = do_socketcall(arg1, arg2);
6233 break;
6234 #endif
6235 #ifdef TARGET_NR_accept
6236 case TARGET_NR_accept:
6237 ret = do_accept(arg1, arg2, arg3);
6238 break;
6239 #endif
6240 #ifdef TARGET_NR_bind
6241 case TARGET_NR_bind:
6242 ret = do_bind(arg1, arg2, arg3);
6243 break;
6244 #endif
6245 #ifdef TARGET_NR_connect
6246 case TARGET_NR_connect:
6247 ret = do_connect(arg1, arg2, arg3);
6248 break;
6249 #endif
6250 #ifdef TARGET_NR_getpeername
6251 case TARGET_NR_getpeername:
6252 ret = do_getpeername(arg1, arg2, arg3);
6253 break;
6254 #endif
6255 #ifdef TARGET_NR_getsockname
6256 case TARGET_NR_getsockname:
6257 ret = do_getsockname(arg1, arg2, arg3);
6258 break;
6259 #endif
6260 #ifdef TARGET_NR_getsockopt
6261 case TARGET_NR_getsockopt:
6262 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6263 break;
6264 #endif
6265 #ifdef TARGET_NR_listen
6266 case TARGET_NR_listen:
6267 ret = get_errno(listen(arg1, arg2));
6268 break;
6269 #endif
6270 #ifdef TARGET_NR_recv
6271 case TARGET_NR_recv:
6272 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6273 break;
6274 #endif
6275 #ifdef TARGET_NR_recvfrom
6276 case TARGET_NR_recvfrom:
6277 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6278 break;
6279 #endif
6280 #ifdef TARGET_NR_recvmsg
6281 case TARGET_NR_recvmsg:
6282 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6283 break;
6284 #endif
6285 #ifdef TARGET_NR_send
6286 case TARGET_NR_send:
6287 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6288 break;
6289 #endif
6290 #ifdef TARGET_NR_sendmsg
6291 case TARGET_NR_sendmsg:
6292 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6293 break;
6294 #endif
6295 #ifdef TARGET_NR_sendto
6296 case TARGET_NR_sendto:
6297 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6298 break;
6299 #endif
6300 #ifdef TARGET_NR_shutdown
6301 case TARGET_NR_shutdown:
6302 ret = get_errno(shutdown(arg1, arg2));
6303 break;
6304 #endif
6305 #ifdef TARGET_NR_socket
6306 case TARGET_NR_socket:
6307 ret = do_socket(arg1, arg2, arg3);
6308 break;
6309 #endif
6310 #ifdef TARGET_NR_socketpair
6311 case TARGET_NR_socketpair:
6312 ret = do_socketpair(arg1, arg2, arg3, arg4);
6313 break;
6314 #endif
6315 #ifdef TARGET_NR_setsockopt
6316 case TARGET_NR_setsockopt:
6317 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6318 break;
6319 #endif
6320
6321 case TARGET_NR_syslog:
6322 if (!(p = lock_user_string(arg2)))
6323 goto efault;
6324 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6325 unlock_user(p, arg2, 0);
6326 break;
6327
6328 case TARGET_NR_setitimer:
6329 {
6330 struct itimerval value, ovalue, *pvalue;
6331
6332 if (arg2) {
6333 pvalue = &value;
6334 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6335 || copy_from_user_timeval(&pvalue->it_value,
6336 arg2 + sizeof(struct target_timeval)))
6337 goto efault;
6338 } else {
6339 pvalue = NULL;
6340 }
6341 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6342 if (!is_error(ret) && arg3) {
6343 if (copy_to_user_timeval(arg3,
6344 &ovalue.it_interval)
6345 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6346 &ovalue.it_value))
6347 goto efault;
6348 }
6349 }
6350 break;
6351 case TARGET_NR_getitimer:
6352 {
6353 struct itimerval value;
6354
6355 ret = get_errno(getitimer(arg1, &value));
6356 if (!is_error(ret) && arg2) {
6357 if (copy_to_user_timeval(arg2,
6358 &value.it_interval)
6359 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6360 &value.it_value))
6361 goto efault;
6362 }
6363 }
6364 break;
6365 case TARGET_NR_stat:
6366 if (!(p = lock_user_string(arg1)))
6367 goto efault;
6368 ret = get_errno(stat(path(p), &st));
6369 unlock_user(p, arg1, 0);
6370 goto do_stat;
6371 case TARGET_NR_lstat:
6372 if (!(p = lock_user_string(arg1)))
6373 goto efault;
6374 ret = get_errno(lstat(path(p), &st));
6375 unlock_user(p, arg1, 0);
6376 goto do_stat;
6377 case TARGET_NR_fstat:
6378 {
6379 ret = get_errno(fstat(arg1, &st));
6380 do_stat:
6381 if (!is_error(ret)) {
6382 struct target_stat *target_st;
6383
6384 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6385 goto efault;
6386 memset(target_st, 0, sizeof(*target_st));
6387 __put_user(st.st_dev, &target_st->st_dev);
6388 __put_user(st.st_ino, &target_st->st_ino);
6389 __put_user(st.st_mode, &target_st->st_mode);
6390 __put_user(st.st_uid, &target_st->st_uid);
6391 __put_user(st.st_gid, &target_st->st_gid);
6392 __put_user(st.st_nlink, &target_st->st_nlink);
6393 __put_user(st.st_rdev, &target_st->st_rdev);
6394 __put_user(st.st_size, &target_st->st_size);
6395 __put_user(st.st_blksize, &target_st->st_blksize);
6396 __put_user(st.st_blocks, &target_st->st_blocks);
6397 __put_user(st.st_atime, &target_st->target_st_atime);
6398 __put_user(st.st_mtime, &target_st->target_st_mtime);
6399 __put_user(st.st_ctime, &target_st->target_st_ctime);
6400 unlock_user_struct(target_st, arg2, 1);
6401 }
6402 }
6403 break;
6404 #ifdef TARGET_NR_olduname
6405 case TARGET_NR_olduname:
6406 goto unimplemented;
6407 #endif
6408 #ifdef TARGET_NR_iopl
6409 case TARGET_NR_iopl:
6410 goto unimplemented;
6411 #endif
6412 case TARGET_NR_vhangup:
6413 ret = get_errno(vhangup());
6414 break;
6415 #ifdef TARGET_NR_idle
6416 case TARGET_NR_idle:
6417 goto unimplemented;
6418 #endif
6419 #ifdef TARGET_NR_syscall
6420 case TARGET_NR_syscall:
6421 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6422 arg6, arg7, arg8, 0);
6423 break;
6424 #endif
6425 case TARGET_NR_wait4:
6426 {
6427 int status;
6428 abi_long status_ptr = arg2;
6429 struct rusage rusage, *rusage_ptr;
6430 abi_ulong target_rusage = arg4;
6431 if (target_rusage)
6432 rusage_ptr = &rusage;
6433 else
6434 rusage_ptr = NULL;
6435 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6436 if (!is_error(ret)) {
6437 if (status_ptr && ret) {
6438 status = host_to_target_waitstatus(status);
6439 if (put_user_s32(status, status_ptr))
6440 goto efault;
6441 }
6442 if (target_rusage)
6443 host_to_target_rusage(target_rusage, &rusage);
6444 }
6445 }
6446 break;
6447 #ifdef TARGET_NR_swapoff
6448 case TARGET_NR_swapoff:
6449 if (!(p = lock_user_string(arg1)))
6450 goto efault;
6451 ret = get_errno(swapoff(p));
6452 unlock_user(p, arg1, 0);
6453 break;
6454 #endif
6455 case TARGET_NR_sysinfo:
6456 {
6457 struct target_sysinfo *target_value;
6458 struct sysinfo value;
6459 ret = get_errno(sysinfo(&value));
6460 if (!is_error(ret) && arg1)
6461 {
6462 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6463 goto efault;
6464 __put_user(value.uptime, &target_value->uptime);
6465 __put_user(value.loads[0], &target_value->loads[0]);
6466 __put_user(value.loads[1], &target_value->loads[1]);
6467 __put_user(value.loads[2], &target_value->loads[2]);
6468 __put_user(value.totalram, &target_value->totalram);
6469 __put_user(value.freeram, &target_value->freeram);
6470 __put_user(value.sharedram, &target_value->sharedram);
6471 __put_user(value.bufferram, &target_value->bufferram);
6472 __put_user(value.totalswap, &target_value->totalswap);
6473 __put_user(value.freeswap, &target_value->freeswap);
6474 __put_user(value.procs, &target_value->procs);
6475 __put_user(value.totalhigh, &target_value->totalhigh);
6476 __put_user(value.freehigh, &target_value->freehigh);
6477 __put_user(value.mem_unit, &target_value->mem_unit);
6478 unlock_user_struct(target_value, arg1, 1);
6479 }
6480 }
6481 break;
6482 #ifdef TARGET_NR_ipc
6483 case TARGET_NR_ipc:
6484 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6485 break;
6486 #endif
6487 #ifdef TARGET_NR_semget
6488 case TARGET_NR_semget:
6489 ret = get_errno(semget(arg1, arg2, arg3));
6490 break;
6491 #endif
6492 #ifdef TARGET_NR_semop
6493 case TARGET_NR_semop:
6494 ret = get_errno(do_semop(arg1, arg2, arg3));
6495 break;
6496 #endif
6497 #ifdef TARGET_NR_semctl
6498 case TARGET_NR_semctl:
6499 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6500 break;
6501 #endif
6502 #ifdef TARGET_NR_msgctl
6503 case TARGET_NR_msgctl:
6504 ret = do_msgctl(arg1, arg2, arg3);
6505 break;
6506 #endif
6507 #ifdef TARGET_NR_msgget
6508 case TARGET_NR_msgget:
6509 ret = get_errno(msgget(arg1, arg2));
6510 break;
6511 #endif
6512 #ifdef TARGET_NR_msgrcv
6513 case TARGET_NR_msgrcv:
6514 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6515 break;
6516 #endif
6517 #ifdef TARGET_NR_msgsnd
6518 case TARGET_NR_msgsnd:
6519 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6520 break;
6521 #endif
6522 #ifdef TARGET_NR_shmget
6523 case TARGET_NR_shmget:
6524 ret = get_errno(shmget(arg1, arg2, arg3));
6525 break;
6526 #endif
6527 #ifdef TARGET_NR_shmctl
6528 case TARGET_NR_shmctl:
6529 ret = do_shmctl(arg1, arg2, arg3);
6530 break;
6531 #endif
6532 #ifdef TARGET_NR_shmat
6533 case TARGET_NR_shmat:
6534 ret = do_shmat(arg1, arg2, arg3);
6535 break;
6536 #endif
6537 #ifdef TARGET_NR_shmdt
6538 case TARGET_NR_shmdt:
6539 ret = do_shmdt(arg1);
6540 break;
6541 #endif
6542 case TARGET_NR_fsync:
6543 ret = get_errno(fsync(arg1));
6544 break;
6545 case TARGET_NR_clone:
6546 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6547 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6548 #elif defined(TARGET_CRIS)
6549 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6550 #elif defined(TARGET_S390X)
6551 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6552 #else
6553 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6554 #endif
6555 break;
6556 #ifdef __NR_exit_group
6557 /* new thread calls */
6558 case TARGET_NR_exit_group:
6559 #ifdef TARGET_GPROF
6560 _mcleanup();
6561 #endif
6562 gdb_exit(cpu_env, arg1);
6563 ret = get_errno(exit_group(arg1));
6564 break;
6565 #endif
6566 case TARGET_NR_setdomainname:
6567 if (!(p = lock_user_string(arg1)))
6568 goto efault;
6569 ret = get_errno(setdomainname(p, arg2));
6570 unlock_user(p, arg1, 0);
6571 break;
6572 case TARGET_NR_uname:
6573 /* no need to transcode because we use the linux syscall */
6574 {
6575 struct new_utsname * buf;
6576
6577 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6578 goto efault;
6579 ret = get_errno(sys_uname(buf));
6580 if (!is_error(ret)) {
6581 /* Overrite the native machine name with whatever is being
6582 emulated. */
6583 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6584 /* Allow the user to override the reported release. */
6585 if (qemu_uname_release && *qemu_uname_release)
6586 strcpy (buf->release, qemu_uname_release);
6587 }
6588 unlock_user_struct(buf, arg1, 1);
6589 }
6590 break;
6591 #ifdef TARGET_I386
6592 case TARGET_NR_modify_ldt:
6593 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6594 break;
6595 #if !defined(TARGET_X86_64)
6596 case TARGET_NR_vm86old:
6597 goto unimplemented;
6598 case TARGET_NR_vm86:
6599 ret = do_vm86(cpu_env, arg1, arg2);
6600 break;
6601 #endif
6602 #endif
6603 case TARGET_NR_adjtimex:
6604 goto unimplemented;
6605 #ifdef TARGET_NR_create_module
6606 case TARGET_NR_create_module:
6607 #endif
6608 case TARGET_NR_init_module:
6609 case TARGET_NR_delete_module:
6610 #ifdef TARGET_NR_get_kernel_syms
6611 case TARGET_NR_get_kernel_syms:
6612 #endif
6613 goto unimplemented;
6614 case TARGET_NR_quotactl:
6615 goto unimplemented;
6616 case TARGET_NR_getpgid:
6617 ret = get_errno(getpgid(arg1));
6618 break;
6619 case TARGET_NR_fchdir:
6620 ret = get_errno(fchdir(arg1));
6621 break;
6622 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6623 case TARGET_NR_bdflush:
6624 goto unimplemented;
6625 #endif
6626 #ifdef TARGET_NR_sysfs
6627 case TARGET_NR_sysfs:
6628 goto unimplemented;
6629 #endif
6630 case TARGET_NR_personality:
6631 ret = get_errno(personality(arg1));
6632 break;
6633 #ifdef TARGET_NR_afs_syscall
6634 case TARGET_NR_afs_syscall:
6635 goto unimplemented;
6636 #endif
6637 #ifdef TARGET_NR__llseek /* Not on alpha */
6638 case TARGET_NR__llseek:
6639 {
6640 int64_t res;
6641 #if !defined(__NR_llseek)
6642 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6643 if (res == -1) {
6644 ret = get_errno(res);
6645 } else {
6646 ret = 0;
6647 }
6648 #else
6649 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6650 #endif
6651 if ((ret == 0) && put_user_s64(res, arg4)) {
6652 goto efault;
6653 }
6654 }
6655 break;
6656 #endif
6657 case TARGET_NR_getdents:
6658 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6659 {
6660 struct target_dirent *target_dirp;
6661 struct linux_dirent *dirp;
6662 abi_long count = arg3;
6663
6664 dirp = malloc(count);
6665 if (!dirp) {
6666 ret = -TARGET_ENOMEM;
6667 goto fail;
6668 }
6669
6670 ret = get_errno(sys_getdents(arg1, dirp, count));
6671 if (!is_error(ret)) {
6672 struct linux_dirent *de;
6673 struct target_dirent *tde;
6674 int len = ret;
6675 int reclen, treclen;
6676 int count1, tnamelen;
6677
6678 count1 = 0;
6679 de = dirp;
6680 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6681 goto efault;
6682 tde = target_dirp;
6683 while (len > 0) {
6684 reclen = de->d_reclen;
6685 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6686 tde->d_reclen = tswap16(treclen);
6687 tde->d_ino = tswapal(de->d_ino);
6688 tde->d_off = tswapal(de->d_off);
6689 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6690 if (tnamelen > 256)
6691 tnamelen = 256;
6692 /* XXX: may not be correct */
6693 pstrcpy(tde->d_name, tnamelen, de->d_name);
6694 de = (struct linux_dirent *)((char *)de + reclen);
6695 len -= reclen;
6696 tde = (struct target_dirent *)((char *)tde + treclen);
6697 count1 += treclen;
6698 }
6699 ret = count1;
6700 unlock_user(target_dirp, arg2, ret);
6701 }
6702 free(dirp);
6703 }
6704 #else
6705 {
6706 struct linux_dirent *dirp;
6707 abi_long count = arg3;
6708
6709 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6710 goto efault;
6711 ret = get_errno(sys_getdents(arg1, dirp, count));
6712 if (!is_error(ret)) {
6713 struct linux_dirent *de;
6714 int len = ret;
6715 int reclen;
6716 de = dirp;
6717 while (len > 0) {
6718 reclen = de->d_reclen;
6719 if (reclen > len)
6720 break;
6721 de->d_reclen = tswap16(reclen);
6722 tswapls(&de->d_ino);
6723 tswapls(&de->d_off);
6724 de = (struct linux_dirent *)((char *)de + reclen);
6725 len -= reclen;
6726 }
6727 }
6728 unlock_user(dirp, arg2, ret);
6729 }
6730 #endif
6731 break;
6732 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6733 case TARGET_NR_getdents64:
6734 {
6735 struct linux_dirent64 *dirp;
6736 abi_long count = arg3;
6737 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6738 goto efault;
6739 ret = get_errno(sys_getdents64(arg1, dirp, count));
6740 if (!is_error(ret)) {
6741 struct linux_dirent64 *de;
6742 int len = ret;
6743 int reclen;
6744 de = dirp;
6745 while (len > 0) {
6746 reclen = de->d_reclen;
6747 if (reclen > len)
6748 break;
6749 de->d_reclen = tswap16(reclen);
6750 tswap64s((uint64_t *)&de->d_ino);
6751 tswap64s((uint64_t *)&de->d_off);
6752 de = (struct linux_dirent64 *)((char *)de + reclen);
6753 len -= reclen;
6754 }
6755 }
6756 unlock_user(dirp, arg2, ret);
6757 }
6758 break;
6759 #endif /* TARGET_NR_getdents64 */
6760 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6761 #ifdef TARGET_S390X
6762 case TARGET_NR_select:
6763 #else
6764 case TARGET_NR__newselect:
6765 #endif
6766 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6767 break;
6768 #endif
6769 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6770 # ifdef TARGET_NR_poll
6771 case TARGET_NR_poll:
6772 # endif
6773 # ifdef TARGET_NR_ppoll
6774 case TARGET_NR_ppoll:
6775 # endif
6776 {
6777 struct target_pollfd *target_pfd;
6778 unsigned int nfds = arg2;
6779 int timeout = arg3;
6780 struct pollfd *pfd;
6781 unsigned int i;
6782
6783 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6784 if (!target_pfd)
6785 goto efault;
6786
6787 pfd = alloca(sizeof(struct pollfd) * nfds);
6788 for(i = 0; i < nfds; i++) {
6789 pfd[i].fd = tswap32(target_pfd[i].fd);
6790 pfd[i].events = tswap16(target_pfd[i].events);
6791 }
6792
6793 # ifdef TARGET_NR_ppoll
6794 if (num == TARGET_NR_ppoll) {
6795 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6796 target_sigset_t *target_set;
6797 sigset_t _set, *set = &_set;
6798
6799 if (arg3) {
6800 if (target_to_host_timespec(timeout_ts, arg3)) {
6801 unlock_user(target_pfd, arg1, 0);
6802 goto efault;
6803 }
6804 } else {
6805 timeout_ts = NULL;
6806 }
6807
6808 if (arg4) {
6809 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6810 if (!target_set) {
6811 unlock_user(target_pfd, arg1, 0);
6812 goto efault;
6813 }
6814 target_to_host_sigset(set, target_set);
6815 } else {
6816 set = NULL;
6817 }
6818
6819 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6820
6821 if (!is_error(ret) && arg3) {
6822 host_to_target_timespec(arg3, timeout_ts);
6823 }
6824 if (arg4) {
6825 unlock_user(target_set, arg4, 0);
6826 }
6827 } else
6828 # endif
6829 ret = get_errno(poll(pfd, nfds, timeout));
6830
6831 if (!is_error(ret)) {
6832 for(i = 0; i < nfds; i++) {
6833 target_pfd[i].revents = tswap16(pfd[i].revents);
6834 }
6835 }
6836 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6837 }
6838 break;
6839 #endif
6840 case TARGET_NR_flock:
6841 /* NOTE: the flock constant seems to be the same for every
6842 Linux platform */
6843 ret = get_errno(flock(arg1, arg2));
6844 break;
6845 case TARGET_NR_readv:
6846 {
6847 int count = arg3;
6848 struct iovec *vec;
6849
6850 vec = alloca(count * sizeof(struct iovec));
6851 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6852 goto efault;
6853 ret = get_errno(readv(arg1, vec, count));
6854 unlock_iovec(vec, arg2, count, 1);
6855 }
6856 break;
6857 case TARGET_NR_writev:
6858 {
6859 int count = arg3;
6860 struct iovec *vec;
6861
6862 vec = alloca(count * sizeof(struct iovec));
6863 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6864 goto efault;
6865 ret = get_errno(writev(arg1, vec, count));
6866 unlock_iovec(vec, arg2, count, 0);
6867 }
6868 break;
6869 case TARGET_NR_getsid:
6870 ret = get_errno(getsid(arg1));
6871 break;
6872 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6873 case TARGET_NR_fdatasync:
6874 ret = get_errno(fdatasync(arg1));
6875 break;
6876 #endif
6877 case TARGET_NR__sysctl:
6878 /* We don't implement this, but ENOTDIR is always a safe
6879 return value. */
6880 ret = -TARGET_ENOTDIR;
6881 break;
6882 case TARGET_NR_sched_getaffinity:
6883 {
6884 unsigned int mask_size;
6885 unsigned long *mask;
6886
6887 /*
6888 * sched_getaffinity needs multiples of ulong, so need to take
6889 * care of mismatches between target ulong and host ulong sizes.
6890 */
6891 if (arg2 & (sizeof(abi_ulong) - 1)) {
6892 ret = -TARGET_EINVAL;
6893 break;
6894 }
6895 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6896
6897 mask = alloca(mask_size);
6898 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6899
6900 if (!is_error(ret)) {
6901 if (copy_to_user(arg3, mask, ret)) {
6902 goto efault;
6903 }
6904 }
6905 }
6906 break;
6907 case TARGET_NR_sched_setaffinity:
6908 {
6909 unsigned int mask_size;
6910 unsigned long *mask;
6911
6912 /*
6913 * sched_setaffinity needs multiples of ulong, so need to take
6914 * care of mismatches between target ulong and host ulong sizes.
6915 */
6916 if (arg2 & (sizeof(abi_ulong) - 1)) {
6917 ret = -TARGET_EINVAL;
6918 break;
6919 }
6920 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6921
6922 mask = alloca(mask_size);
6923 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6924 goto efault;
6925 }
6926 memcpy(mask, p, arg2);
6927 unlock_user_struct(p, arg2, 0);
6928
6929 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6930 }
6931 break;
6932 case TARGET_NR_sched_setparam:
6933 {
6934 struct sched_param *target_schp;
6935 struct sched_param schp;
6936
6937 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6938 goto efault;
6939 schp.sched_priority = tswap32(target_schp->sched_priority);
6940 unlock_user_struct(target_schp, arg2, 0);
6941 ret = get_errno(sched_setparam(arg1, &schp));
6942 }
6943 break;
6944 case TARGET_NR_sched_getparam:
6945 {
6946 struct sched_param *target_schp;
6947 struct sched_param schp;
6948 ret = get_errno(sched_getparam(arg1, &schp));
6949 if (!is_error(ret)) {
6950 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6951 goto efault;
6952 target_schp->sched_priority = tswap32(schp.sched_priority);
6953 unlock_user_struct(target_schp, arg2, 1);
6954 }
6955 }
6956 break;
6957 case TARGET_NR_sched_setscheduler:
6958 {
6959 struct sched_param *target_schp;
6960 struct sched_param schp;
6961 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6962 goto efault;
6963 schp.sched_priority = tswap32(target_schp->sched_priority);
6964 unlock_user_struct(target_schp, arg3, 0);
6965 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6966 }
6967 break;
6968 case TARGET_NR_sched_getscheduler:
6969 ret = get_errno(sched_getscheduler(arg1));
6970 break;
6971 case TARGET_NR_sched_yield:
6972 ret = get_errno(sched_yield());
6973 break;
6974 case TARGET_NR_sched_get_priority_max:
6975 ret = get_errno(sched_get_priority_max(arg1));
6976 break;
6977 case TARGET_NR_sched_get_priority_min:
6978 ret = get_errno(sched_get_priority_min(arg1));
6979 break;
6980 case TARGET_NR_sched_rr_get_interval:
6981 {
6982 struct timespec ts;
6983 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6984 if (!is_error(ret)) {
6985 host_to_target_timespec(arg2, &ts);
6986 }
6987 }
6988 break;
6989 case TARGET_NR_nanosleep:
6990 {
6991 struct timespec req, rem;
6992 target_to_host_timespec(&req, arg1);
6993 ret = get_errno(nanosleep(&req, &rem));
6994 if (is_error(ret) && arg2) {
6995 host_to_target_timespec(arg2, &rem);
6996 }
6997 }
6998 break;
6999 #ifdef TARGET_NR_query_module
7000 case TARGET_NR_query_module:
7001 goto unimplemented;
7002 #endif
7003 #ifdef TARGET_NR_nfsservctl
7004 case TARGET_NR_nfsservctl:
7005 goto unimplemented;
7006 #endif
7007 case TARGET_NR_prctl:
7008 switch (arg1)
7009 {
7010 case PR_GET_PDEATHSIG:
7011 {
7012 int deathsig;
7013 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7014 if (!is_error(ret) && arg2
7015 && put_user_ual(deathsig, arg2))
7016 goto efault;
7017 }
7018 break;
7019 default:
7020 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7021 break;
7022 }
7023 break;
7024 #ifdef TARGET_NR_arch_prctl
7025 case TARGET_NR_arch_prctl:
7026 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7027 ret = do_arch_prctl(cpu_env, arg1, arg2);
7028 break;
7029 #else
7030 goto unimplemented;
7031 #endif
7032 #endif
7033 #ifdef TARGET_NR_pread
7034 case TARGET_NR_pread:
7035 if (regpairs_aligned(cpu_env))
7036 arg4 = arg5;
7037 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7038 goto efault;
7039 ret = get_errno(pread(arg1, p, arg3, arg4));
7040 unlock_user(p, arg2, ret);
7041 break;
7042 case TARGET_NR_pwrite:
7043 if (regpairs_aligned(cpu_env))
7044 arg4 = arg5;
7045 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7046 goto efault;
7047 ret = get_errno(pwrite(arg1, p, arg3, arg4));
7048 unlock_user(p, arg2, 0);
7049 break;
7050 #endif
7051 #ifdef TARGET_NR_pread64
7052 case TARGET_NR_pread64:
7053 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7054 goto efault;
7055 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7056 unlock_user(p, arg2, ret);
7057 break;
7058 case TARGET_NR_pwrite64:
7059 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7060 goto efault;
7061 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7062 unlock_user(p, arg2, 0);
7063 break;
7064 #endif
7065 case TARGET_NR_getcwd:
7066 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7067 goto efault;
7068 ret = get_errno(sys_getcwd1(p, arg2));
7069 unlock_user(p, arg1, ret);
7070 break;
7071 case TARGET_NR_capget:
7072 goto unimplemented;
7073 case TARGET_NR_capset:
7074 goto unimplemented;
7075 case TARGET_NR_sigaltstack:
7076 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7077 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7078 defined(TARGET_M68K) || defined(TARGET_S390X)
7079 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7080 break;
7081 #else
7082 goto unimplemented;
7083 #endif
7084 case TARGET_NR_sendfile:
7085 goto unimplemented;
7086 #ifdef TARGET_NR_getpmsg
7087 case TARGET_NR_getpmsg:
7088 goto unimplemented;
7089 #endif
7090 #ifdef TARGET_NR_putpmsg
7091 case TARGET_NR_putpmsg:
7092 goto unimplemented;
7093 #endif
7094 #ifdef TARGET_NR_vfork
7095 case TARGET_NR_vfork:
7096 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7097 0, 0, 0, 0));
7098 break;
7099 #endif
7100 #ifdef TARGET_NR_ugetrlimit
7101 case TARGET_NR_ugetrlimit:
7102 {
7103 struct rlimit rlim;
7104 int resource = target_to_host_resource(arg1);
7105 ret = get_errno(getrlimit(resource, &rlim));
7106 if (!is_error(ret)) {
7107 struct target_rlimit *target_rlim;
7108 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7109 goto efault;
7110 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7111 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7112 unlock_user_struct(target_rlim, arg2, 1);
7113 }
7114 break;
7115 }
7116 #endif
7117 #ifdef TARGET_NR_truncate64
7118 case TARGET_NR_truncate64:
7119 if (!(p = lock_user_string(arg1)))
7120 goto efault;
7121 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7122 unlock_user(p, arg1, 0);
7123 break;
7124 #endif
7125 #ifdef TARGET_NR_ftruncate64
7126 case TARGET_NR_ftruncate64:
7127 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7128 break;
7129 #endif
7130 #ifdef TARGET_NR_stat64
7131 case TARGET_NR_stat64:
7132 if (!(p = lock_user_string(arg1)))
7133 goto efault;
7134 ret = get_errno(stat(path(p), &st));
7135 unlock_user(p, arg1, 0);
7136 if (!is_error(ret))
7137 ret = host_to_target_stat64(cpu_env, arg2, &st);
7138 break;
7139 #endif
7140 #ifdef TARGET_NR_lstat64
7141 case TARGET_NR_lstat64:
7142 if (!(p = lock_user_string(arg1)))
7143 goto efault;
7144 ret = get_errno(lstat(path(p), &st));
7145 unlock_user(p, arg1, 0);
7146 if (!is_error(ret))
7147 ret = host_to_target_stat64(cpu_env, arg2, &st);
7148 break;
7149 #endif
7150 #ifdef TARGET_NR_fstat64
7151 case TARGET_NR_fstat64:
7152 ret = get_errno(fstat(arg1, &st));
7153 if (!is_error(ret))
7154 ret = host_to_target_stat64(cpu_env, arg2, &st);
7155 break;
7156 #endif
7157 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7158 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7159 #ifdef TARGET_NR_fstatat64
7160 case TARGET_NR_fstatat64:
7161 #endif
7162 #ifdef TARGET_NR_newfstatat
7163 case TARGET_NR_newfstatat:
7164 #endif
7165 if (!(p = lock_user_string(arg2)))
7166 goto efault;
7167 #ifdef __NR_fstatat64
7168 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7169 #else
7170 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7171 #endif
7172 if (!is_error(ret))
7173 ret = host_to_target_stat64(cpu_env, arg3, &st);
7174 break;
7175 #endif
7176 case TARGET_NR_lchown:
7177 if (!(p = lock_user_string(arg1)))
7178 goto efault;
7179 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7180 unlock_user(p, arg1, 0);
7181 break;
7182 #ifdef TARGET_NR_getuid
7183 case TARGET_NR_getuid:
7184 ret = get_errno(high2lowuid(getuid()));
7185 break;
7186 #endif
7187 #ifdef TARGET_NR_getgid
7188 case TARGET_NR_getgid:
7189 ret = get_errno(high2lowgid(getgid()));
7190 break;
7191 #endif
7192 #ifdef TARGET_NR_geteuid
7193 case TARGET_NR_geteuid:
7194 ret = get_errno(high2lowuid(geteuid()));
7195 break;
7196 #endif
7197 #ifdef TARGET_NR_getegid
7198 case TARGET_NR_getegid:
7199 ret = get_errno(high2lowgid(getegid()));
7200 break;
7201 #endif
7202 case TARGET_NR_setreuid:
7203 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7204 break;
7205 case TARGET_NR_setregid:
7206 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7207 break;
7208 case TARGET_NR_getgroups:
7209 {
7210 int gidsetsize = arg1;
7211 target_id *target_grouplist;
7212 gid_t *grouplist;
7213 int i;
7214
7215 grouplist = alloca(gidsetsize * sizeof(gid_t));
7216 ret = get_errno(getgroups(gidsetsize, grouplist));
7217 if (gidsetsize == 0)
7218 break;
7219 if (!is_error(ret)) {
7220 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7221 if (!target_grouplist)
7222 goto efault;
7223 for(i = 0;i < ret; i++)
7224 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7225 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7226 }
7227 }
7228 break;
7229 case TARGET_NR_setgroups:
7230 {
7231 int gidsetsize = arg1;
7232 target_id *target_grouplist;
7233 gid_t *grouplist;
7234 int i;
7235
7236 grouplist = alloca(gidsetsize * sizeof(gid_t));
7237 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7238 if (!target_grouplist) {
7239 ret = -TARGET_EFAULT;
7240 goto fail;
7241 }
7242 for(i = 0;i < gidsetsize; i++)
7243 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7244 unlock_user(target_grouplist, arg2, 0);
7245 ret = get_errno(setgroups(gidsetsize, grouplist));
7246 }
7247 break;
7248 case TARGET_NR_fchown:
7249 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7250 break;
7251 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7252 case TARGET_NR_fchownat:
7253 if (!(p = lock_user_string(arg2)))
7254 goto efault;
7255 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7256 unlock_user(p, arg2, 0);
7257 break;
7258 #endif
7259 #ifdef TARGET_NR_setresuid
7260 case TARGET_NR_setresuid:
7261 ret = get_errno(setresuid(low2highuid(arg1),
7262 low2highuid(arg2),
7263 low2highuid(arg3)));
7264 break;
7265 #endif
7266 #ifdef TARGET_NR_getresuid
7267 case TARGET_NR_getresuid:
7268 {
7269 uid_t ruid, euid, suid;
7270 ret = get_errno(getresuid(&ruid, &euid, &suid));
7271 if (!is_error(ret)) {
7272 if (put_user_u16(high2lowuid(ruid), arg1)
7273 || put_user_u16(high2lowuid(euid), arg2)
7274 || put_user_u16(high2lowuid(suid), arg3))
7275 goto efault;
7276 }
7277 }
7278 break;
7279 #endif
7280 #ifdef TARGET_NR_getresgid
7281 case TARGET_NR_setresgid:
7282 ret = get_errno(setresgid(low2highgid(arg1),
7283 low2highgid(arg2),
7284 low2highgid(arg3)));
7285 break;
7286 #endif
7287 #ifdef TARGET_NR_getresgid
7288 case TARGET_NR_getresgid:
7289 {
7290 gid_t rgid, egid, sgid;
7291 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7292 if (!is_error(ret)) {
7293 if (put_user_u16(high2lowgid(rgid), arg1)
7294 || put_user_u16(high2lowgid(egid), arg2)
7295 || put_user_u16(high2lowgid(sgid), arg3))
7296 goto efault;
7297 }
7298 }
7299 break;
7300 #endif
7301 case TARGET_NR_chown:
7302 if (!(p = lock_user_string(arg1)))
7303 goto efault;
7304 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7305 unlock_user(p, arg1, 0);
7306 break;
7307 case TARGET_NR_setuid:
7308 ret = get_errno(setuid(low2highuid(arg1)));
7309 break;
7310 case TARGET_NR_setgid:
7311 ret = get_errno(setgid(low2highgid(arg1)));
7312 break;
7313 case TARGET_NR_setfsuid:
7314 ret = get_errno(setfsuid(arg1));
7315 break;
7316 case TARGET_NR_setfsgid:
7317 ret = get_errno(setfsgid(arg1));
7318 break;
7319
7320 #ifdef TARGET_NR_lchown32
7321 case TARGET_NR_lchown32:
7322 if (!(p = lock_user_string(arg1)))
7323 goto efault;
7324 ret = get_errno(lchown(p, arg2, arg3));
7325 unlock_user(p, arg1, 0);
7326 break;
7327 #endif
7328 #ifdef TARGET_NR_getuid32
7329 case TARGET_NR_getuid32:
7330 ret = get_errno(getuid());
7331 break;
7332 #endif
7333
7334 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7335 /* Alpha specific */
7336 case TARGET_NR_getxuid:
7337 {
7338 uid_t euid;
7339 euid=geteuid();
7340 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7341 }
7342 ret = get_errno(getuid());
7343 break;
7344 #endif
7345 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7346 /* Alpha specific */
7347 case TARGET_NR_getxgid:
7348 {
7349 uid_t egid;
7350 egid=getegid();
7351 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7352 }
7353 ret = get_errno(getgid());
7354 break;
7355 #endif
7356 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7357 /* Alpha specific */
7358 case TARGET_NR_osf_getsysinfo:
7359 ret = -TARGET_EOPNOTSUPP;
7360 switch (arg1) {
7361 case TARGET_GSI_IEEE_FP_CONTROL:
7362 {
7363 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7364
7365 /* Copied from linux ieee_fpcr_to_swcr. */
7366 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7367 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7368 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7369 | SWCR_TRAP_ENABLE_DZE
7370 | SWCR_TRAP_ENABLE_OVF);
7371 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7372 | SWCR_TRAP_ENABLE_INE);
7373 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7374 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7375
7376 if (put_user_u64 (swcr, arg2))
7377 goto efault;
7378 ret = 0;
7379 }
7380 break;
7381
7382 /* case GSI_IEEE_STATE_AT_SIGNAL:
7383 -- Not implemented in linux kernel.
7384 case GSI_UACPROC:
7385 -- Retrieves current unaligned access state; not much used.
7386 case GSI_PROC_TYPE:
7387 -- Retrieves implver information; surely not used.
7388 case GSI_GET_HWRPB:
7389 -- Grabs a copy of the HWRPB; surely not used.
7390 */
7391 }
7392 break;
7393 #endif
7394 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7395 /* Alpha specific */
7396 case TARGET_NR_osf_setsysinfo:
7397 ret = -TARGET_EOPNOTSUPP;
7398 switch (arg1) {
7399 case TARGET_SSI_IEEE_FP_CONTROL:
7400 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7401 {
7402 uint64_t swcr, fpcr, orig_fpcr;
7403
7404 if (get_user_u64 (swcr, arg2))
7405 goto efault;
7406 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7407 fpcr = orig_fpcr & FPCR_DYN_MASK;
7408
7409 /* Copied from linux ieee_swcr_to_fpcr. */
7410 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7411 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7412 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7413 | SWCR_TRAP_ENABLE_DZE
7414 | SWCR_TRAP_ENABLE_OVF)) << 48;
7415 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7416 | SWCR_TRAP_ENABLE_INE)) << 57;
7417 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7418 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7419
7420 cpu_alpha_store_fpcr (cpu_env, fpcr);
7421 ret = 0;
7422
7423 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7424 /* Old exceptions are not signaled. */
7425 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7426
7427 /* If any exceptions set by this call, and are unmasked,
7428 send a signal. */
7429 /* ??? FIXME */
7430 }
7431 }
7432 break;
7433
7434 /* case SSI_NVPAIRS:
7435 -- Used with SSIN_UACPROC to enable unaligned accesses.
7436 case SSI_IEEE_STATE_AT_SIGNAL:
7437 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7438 -- Not implemented in linux kernel
7439 */
7440 }
7441 break;
7442 #endif
7443 #ifdef TARGET_NR_osf_sigprocmask
7444 /* Alpha specific. */
7445 case TARGET_NR_osf_sigprocmask:
7446 {
7447 abi_ulong mask;
7448 int how;
7449 sigset_t set, oldset;
7450
7451 switch(arg1) {
7452 case TARGET_SIG_BLOCK:
7453 how = SIG_BLOCK;
7454 break;
7455 case TARGET_SIG_UNBLOCK:
7456 how = SIG_UNBLOCK;
7457 break;
7458 case TARGET_SIG_SETMASK:
7459 how = SIG_SETMASK;
7460 break;
7461 default:
7462 ret = -TARGET_EINVAL;
7463 goto fail;
7464 }
7465 mask = arg2;
7466 target_to_host_old_sigset(&set, &mask);
7467 sigprocmask(how, &set, &oldset);
7468 host_to_target_old_sigset(&mask, &oldset);
7469 ret = mask;
7470 }
7471 break;
7472 #endif
7473
7474 #ifdef TARGET_NR_getgid32
7475 case TARGET_NR_getgid32:
7476 ret = get_errno(getgid());
7477 break;
7478 #endif
7479 #ifdef TARGET_NR_geteuid32
7480 case TARGET_NR_geteuid32:
7481 ret = get_errno(geteuid());
7482 break;
7483 #endif
7484 #ifdef TARGET_NR_getegid32
7485 case TARGET_NR_getegid32:
7486 ret = get_errno(getegid());
7487 break;
7488 #endif
7489 #ifdef TARGET_NR_setreuid32
7490 case TARGET_NR_setreuid32:
7491 ret = get_errno(setreuid(arg1, arg2));
7492 break;
7493 #endif
7494 #ifdef TARGET_NR_setregid32
7495 case TARGET_NR_setregid32:
7496 ret = get_errno(setregid(arg1, arg2));
7497 break;
7498 #endif
7499 #ifdef TARGET_NR_getgroups32
7500 case TARGET_NR_getgroups32:
7501 {
7502 int gidsetsize = arg1;
7503 uint32_t *target_grouplist;
7504 gid_t *grouplist;
7505 int i;
7506
7507 grouplist = alloca(gidsetsize * sizeof(gid_t));
7508 ret = get_errno(getgroups(gidsetsize, grouplist));
7509 if (gidsetsize == 0)
7510 break;
7511 if (!is_error(ret)) {
7512 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7513 if (!target_grouplist) {
7514 ret = -TARGET_EFAULT;
7515 goto fail;
7516 }
7517 for(i = 0;i < ret; i++)
7518 target_grouplist[i] = tswap32(grouplist[i]);
7519 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7520 }
7521 }
7522 break;
7523 #endif
7524 #ifdef TARGET_NR_setgroups32
7525 case TARGET_NR_setgroups32:
7526 {
7527 int gidsetsize = arg1;
7528 uint32_t *target_grouplist;
7529 gid_t *grouplist;
7530 int i;
7531
7532 grouplist = alloca(gidsetsize * sizeof(gid_t));
7533 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7534 if (!target_grouplist) {
7535 ret = -TARGET_EFAULT;
7536 goto fail;
7537 }
7538 for(i = 0;i < gidsetsize; i++)
7539 grouplist[i] = tswap32(target_grouplist[i]);
7540 unlock_user(target_grouplist, arg2, 0);
7541 ret = get_errno(setgroups(gidsetsize, grouplist));
7542 }
7543 break;
7544 #endif
7545 #ifdef TARGET_NR_fchown32
7546 case TARGET_NR_fchown32:
7547 ret = get_errno(fchown(arg1, arg2, arg3));
7548 break;
7549 #endif
7550 #ifdef TARGET_NR_setresuid32
7551 case TARGET_NR_setresuid32:
7552 ret = get_errno(setresuid(arg1, arg2, arg3));
7553 break;
7554 #endif
7555 #ifdef TARGET_NR_getresuid32
7556 case TARGET_NR_getresuid32:
7557 {
7558 uid_t ruid, euid, suid;
7559 ret = get_errno(getresuid(&ruid, &euid, &suid));
7560 if (!is_error(ret)) {
7561 if (put_user_u32(ruid, arg1)
7562 || put_user_u32(euid, arg2)
7563 || put_user_u32(suid, arg3))
7564 goto efault;
7565 }
7566 }
7567 break;
7568 #endif
7569 #ifdef TARGET_NR_setresgid32
7570 case TARGET_NR_setresgid32:
7571 ret = get_errno(setresgid(arg1, arg2, arg3));
7572 break;
7573 #endif
7574 #ifdef TARGET_NR_getresgid32
7575 case TARGET_NR_getresgid32:
7576 {
7577 gid_t rgid, egid, sgid;
7578 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7579 if (!is_error(ret)) {
7580 if (put_user_u32(rgid, arg1)
7581 || put_user_u32(egid, arg2)
7582 || put_user_u32(sgid, arg3))
7583 goto efault;
7584 }
7585 }
7586 break;
7587 #endif
7588 #ifdef TARGET_NR_chown32
7589 case TARGET_NR_chown32:
7590 if (!(p = lock_user_string(arg1)))
7591 goto efault;
7592 ret = get_errno(chown(p, arg2, arg3));
7593 unlock_user(p, arg1, 0);
7594 break;
7595 #endif
7596 #ifdef TARGET_NR_setuid32
7597 case TARGET_NR_setuid32:
7598 ret = get_errno(setuid(arg1));
7599 break;
7600 #endif
7601 #ifdef TARGET_NR_setgid32
7602 case TARGET_NR_setgid32:
7603 ret = get_errno(setgid(arg1));
7604 break;
7605 #endif
7606 #ifdef TARGET_NR_setfsuid32
7607 case TARGET_NR_setfsuid32:
7608 ret = get_errno(setfsuid(arg1));
7609 break;
7610 #endif
7611 #ifdef TARGET_NR_setfsgid32
7612 case TARGET_NR_setfsgid32:
7613 ret = get_errno(setfsgid(arg1));
7614 break;
7615 #endif
7616
7617 case TARGET_NR_pivot_root:
7618 goto unimplemented;
7619 #ifdef TARGET_NR_mincore
7620 case TARGET_NR_mincore:
7621 {
7622 void *a;
7623 ret = -TARGET_EFAULT;
7624 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7625 goto efault;
7626 if (!(p = lock_user_string(arg3)))
7627 goto mincore_fail;
7628 ret = get_errno(mincore(a, arg2, p));
7629 unlock_user(p, arg3, ret);
7630 mincore_fail:
7631 unlock_user(a, arg1, 0);
7632 }
7633 break;
7634 #endif
7635 #ifdef TARGET_NR_arm_fadvise64_64
7636 case TARGET_NR_arm_fadvise64_64:
7637 {
7638 /*
7639 * arm_fadvise64_64 looks like fadvise64_64 but
7640 * with different argument order
7641 */
7642 abi_long temp;
7643 temp = arg3;
7644 arg3 = arg4;
7645 arg4 = temp;
7646 }
7647 #endif
7648 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7649 #ifdef TARGET_NR_fadvise64_64
7650 case TARGET_NR_fadvise64_64:
7651 #endif
7652 #ifdef TARGET_NR_fadvise64
7653 case TARGET_NR_fadvise64:
7654 #endif
7655 #ifdef TARGET_S390X
7656 switch (arg4) {
7657 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7658 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7659 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7660 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7661 default: break;
7662 }
7663 #endif
7664 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7665 break;
7666 #endif
7667 #ifdef TARGET_NR_madvise
7668 case TARGET_NR_madvise:
7669 /* A straight passthrough may not be safe because qemu sometimes
7670 turns private flie-backed mappings into anonymous mappings.
7671 This will break MADV_DONTNEED.
7672 This is a hint, so ignoring and returning success is ok. */
7673 ret = get_errno(0);
7674 break;
7675 #endif
7676 #if TARGET_ABI_BITS == 32
7677 case TARGET_NR_fcntl64:
7678 {
7679 int cmd;
7680 struct flock64 fl;
7681 struct target_flock64 *target_fl;
7682 #ifdef TARGET_ARM
7683 struct target_eabi_flock64 *target_efl;
7684 #endif
7685
7686 cmd = target_to_host_fcntl_cmd(arg2);
7687 if (cmd == -TARGET_EINVAL) {
7688 ret = cmd;
7689 break;
7690 }
7691
7692 switch(arg2) {
7693 case TARGET_F_GETLK64:
7694 #ifdef TARGET_ARM
7695 if (((CPUARMState *)cpu_env)->eabi) {
7696 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7697 goto efault;
7698 fl.l_type = tswap16(target_efl->l_type);
7699 fl.l_whence = tswap16(target_efl->l_whence);
7700 fl.l_start = tswap64(target_efl->l_start);
7701 fl.l_len = tswap64(target_efl->l_len);
7702 fl.l_pid = tswap32(target_efl->l_pid);
7703 unlock_user_struct(target_efl, arg3, 0);
7704 } else
7705 #endif
7706 {
7707 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7708 goto efault;
7709 fl.l_type = tswap16(target_fl->l_type);
7710 fl.l_whence = tswap16(target_fl->l_whence);
7711 fl.l_start = tswap64(target_fl->l_start);
7712 fl.l_len = tswap64(target_fl->l_len);
7713 fl.l_pid = tswap32(target_fl->l_pid);
7714 unlock_user_struct(target_fl, arg3, 0);
7715 }
7716 ret = get_errno(fcntl(arg1, cmd, &fl));
7717 if (ret == 0) {
7718 #ifdef TARGET_ARM
7719 if (((CPUARMState *)cpu_env)->eabi) {
7720 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7721 goto efault;
7722 target_efl->l_type = tswap16(fl.l_type);
7723 target_efl->l_whence = tswap16(fl.l_whence);
7724 target_efl->l_start = tswap64(fl.l_start);
7725 target_efl->l_len = tswap64(fl.l_len);
7726 target_efl->l_pid = tswap32(fl.l_pid);
7727 unlock_user_struct(target_efl, arg3, 1);
7728 } else
7729 #endif
7730 {
7731 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7732 goto efault;
7733 target_fl->l_type = tswap16(fl.l_type);
7734 target_fl->l_whence = tswap16(fl.l_whence);
7735 target_fl->l_start = tswap64(fl.l_start);
7736 target_fl->l_len = tswap64(fl.l_len);
7737 target_fl->l_pid = tswap32(fl.l_pid);
7738 unlock_user_struct(target_fl, arg3, 1);
7739 }
7740 }
7741 break;
7742
7743 case TARGET_F_SETLK64:
7744 case TARGET_F_SETLKW64:
7745 #ifdef TARGET_ARM
7746 if (((CPUARMState *)cpu_env)->eabi) {
7747 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7748 goto efault;
7749 fl.l_type = tswap16(target_efl->l_type);
7750 fl.l_whence = tswap16(target_efl->l_whence);
7751 fl.l_start = tswap64(target_efl->l_start);
7752 fl.l_len = tswap64(target_efl->l_len);
7753 fl.l_pid = tswap32(target_efl->l_pid);
7754 unlock_user_struct(target_efl, arg3, 0);
7755 } else
7756 #endif
7757 {
7758 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7759 goto efault;
7760 fl.l_type = tswap16(target_fl->l_type);
7761 fl.l_whence = tswap16(target_fl->l_whence);
7762 fl.l_start = tswap64(target_fl->l_start);
7763 fl.l_len = tswap64(target_fl->l_len);
7764 fl.l_pid = tswap32(target_fl->l_pid);
7765 unlock_user_struct(target_fl, arg3, 0);
7766 }
7767 ret = get_errno(fcntl(arg1, cmd, &fl));
7768 break;
7769 default:
7770 ret = do_fcntl(arg1, arg2, arg3);
7771 break;
7772 }
7773 break;
7774 }
7775 #endif
7776 #ifdef TARGET_NR_cacheflush
7777 case TARGET_NR_cacheflush:
7778 /* self-modifying code is handled automatically, so nothing needed */
7779 ret = 0;
7780 break;
7781 #endif
7782 #ifdef TARGET_NR_security
7783 case TARGET_NR_security:
7784 goto unimplemented;
7785 #endif
7786 #ifdef TARGET_NR_getpagesize
7787 case TARGET_NR_getpagesize:
7788 ret = TARGET_PAGE_SIZE;
7789 break;
7790 #endif
7791 case TARGET_NR_gettid:
7792 ret = get_errno(gettid());
7793 break;
7794 #ifdef TARGET_NR_readahead
7795 case TARGET_NR_readahead:
7796 #if TARGET_ABI_BITS == 32
7797 if (regpairs_aligned(cpu_env)) {
7798 arg2 = arg3;
7799 arg3 = arg4;
7800 arg4 = arg5;
7801 }
7802 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7803 #else
7804 ret = get_errno(readahead(arg1, arg2, arg3));
7805 #endif
7806 break;
7807 #endif
7808 #ifdef CONFIG_ATTR
7809 #ifdef TARGET_NR_setxattr
7810 case TARGET_NR_listxattr:
7811 case TARGET_NR_llistxattr:
7812 {
7813 void *p, *b = 0;
7814 if (arg2) {
7815 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
7816 if (!b) {
7817 ret = -TARGET_EFAULT;
7818 break;
7819 }
7820 }
7821 p = lock_user_string(arg1);
7822 if (p) {
7823 if (num == TARGET_NR_listxattr) {
7824 ret = get_errno(listxattr(p, b, arg3));
7825 } else {
7826 ret = get_errno(llistxattr(p, b, arg3));
7827 }
7828 } else {
7829 ret = -TARGET_EFAULT;
7830 }
7831 unlock_user(p, arg1, 0);
7832 unlock_user(b, arg2, arg3);
7833 break;
7834 }
7835 case TARGET_NR_flistxattr:
7836 {
7837 void *b = 0;
7838 if (arg2) {
7839 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
7840 if (!b) {
7841 ret = -TARGET_EFAULT;
7842 break;
7843 }
7844 }
7845 ret = get_errno(flistxattr(arg1, b, arg3));
7846 unlock_user(b, arg2, arg3);
7847 break;
7848 }
7849 case TARGET_NR_setxattr:
7850 case TARGET_NR_lsetxattr:
7851 {
7852 void *p, *n, *v = 0;
7853 if (arg3) {
7854 v = lock_user(VERIFY_READ, arg3, arg4, 1);
7855 if (!v) {
7856 ret = -TARGET_EFAULT;
7857 break;
7858 }
7859 }
7860 p = lock_user_string(arg1);
7861 n = lock_user_string(arg2);
7862 if (p && n) {
7863 if (num == TARGET_NR_setxattr) {
7864 ret = get_errno(setxattr(p, n, v, arg4, arg5));
7865 } else {
7866 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
7867 }
7868 } else {
7869 ret = -TARGET_EFAULT;
7870 }
7871 unlock_user(p, arg1, 0);
7872 unlock_user(n, arg2, 0);
7873 unlock_user(v, arg3, 0);
7874 }
7875 break;
7876 case TARGET_NR_fsetxattr:
7877 {
7878 void *n, *v = 0;
7879 if (arg3) {
7880 v = lock_user(VERIFY_READ, arg3, arg4, 1);
7881 if (!v) {
7882 ret = -TARGET_EFAULT;
7883 break;
7884 }
7885 }
7886 n = lock_user_string(arg2);
7887 if (n) {
7888 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
7889 } else {
7890 ret = -TARGET_EFAULT;
7891 }
7892 unlock_user(n, arg2, 0);
7893 unlock_user(v, arg3, 0);
7894 }
7895 break;
7896 case TARGET_NR_getxattr:
7897 case TARGET_NR_lgetxattr:
7898 {
7899 void *p, *n, *v = 0;
7900 if (arg3) {
7901 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7902 if (!v) {
7903 ret = -TARGET_EFAULT;
7904 break;
7905 }
7906 }
7907 p = lock_user_string(arg1);
7908 n = lock_user_string(arg2);
7909 if (p && n) {
7910 if (num == TARGET_NR_getxattr) {
7911 ret = get_errno(getxattr(p, n, v, arg4));
7912 } else {
7913 ret = get_errno(lgetxattr(p, n, v, arg4));
7914 }
7915 } else {
7916 ret = -TARGET_EFAULT;
7917 }
7918 unlock_user(p, arg1, 0);
7919 unlock_user(n, arg2, 0);
7920 unlock_user(v, arg3, arg4);
7921 }
7922 break;
7923 case TARGET_NR_fgetxattr:
7924 {
7925 void *n, *v = 0;
7926 if (arg3) {
7927 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7928 if (!v) {
7929 ret = -TARGET_EFAULT;
7930 break;
7931 }
7932 }
7933 n = lock_user_string(arg2);
7934 if (n) {
7935 ret = get_errno(fgetxattr(arg1, n, v, arg4));
7936 } else {
7937 ret = -TARGET_EFAULT;
7938 }
7939 unlock_user(n, arg2, 0);
7940 unlock_user(v, arg3, arg4);
7941 }
7942 break;
7943 case TARGET_NR_removexattr:
7944 case TARGET_NR_lremovexattr:
7945 {
7946 void *p, *n;
7947 p = lock_user_string(arg1);
7948 n = lock_user_string(arg2);
7949 if (p && n) {
7950 if (num == TARGET_NR_removexattr) {
7951 ret = get_errno(removexattr(p, n));
7952 } else {
7953 ret = get_errno(lremovexattr(p, n));
7954 }
7955 } else {
7956 ret = -TARGET_EFAULT;
7957 }
7958 unlock_user(p, arg1, 0);
7959 unlock_user(n, arg2, 0);
7960 }
7961 break;
7962 case TARGET_NR_fremovexattr:
7963 {
7964 void *n;
7965 n = lock_user_string(arg2);
7966 if (n) {
7967 ret = get_errno(fremovexattr(arg1, n));
7968 } else {
7969 ret = -TARGET_EFAULT;
7970 }
7971 unlock_user(n, arg2, 0);
7972 }
7973 break;
7974 #endif
7975 #endif /* CONFIG_ATTR */
7976 #ifdef TARGET_NR_set_thread_area
7977 case TARGET_NR_set_thread_area:
7978 #if defined(TARGET_MIPS)
7979 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7980 ret = 0;
7981 break;
7982 #elif defined(TARGET_CRIS)
7983 if (arg1 & 0xff)
7984 ret = -TARGET_EINVAL;
7985 else {
7986 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7987 ret = 0;
7988 }
7989 break;
7990 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7991 ret = do_set_thread_area(cpu_env, arg1);
7992 break;
7993 #else
7994 goto unimplemented_nowarn;
7995 #endif
7996 #endif
7997 #ifdef TARGET_NR_get_thread_area
7998 case TARGET_NR_get_thread_area:
7999 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8000 ret = do_get_thread_area(cpu_env, arg1);
8001 #else
8002 goto unimplemented_nowarn;
8003 #endif
8004 #endif
8005 #ifdef TARGET_NR_getdomainname
8006 case TARGET_NR_getdomainname:
8007 goto unimplemented_nowarn;
8008 #endif
8009
8010 #ifdef TARGET_NR_clock_gettime
8011 case TARGET_NR_clock_gettime:
8012 {
8013 struct timespec ts;
8014 ret = get_errno(clock_gettime(arg1, &ts));
8015 if (!is_error(ret)) {
8016 host_to_target_timespec(arg2, &ts);
8017 }
8018 break;
8019 }
8020 #endif
8021 #ifdef TARGET_NR_clock_getres
8022 case TARGET_NR_clock_getres:
8023 {
8024 struct timespec ts;
8025 ret = get_errno(clock_getres(arg1, &ts));
8026 if (!is_error(ret)) {
8027 host_to_target_timespec(arg2, &ts);
8028 }
8029 break;
8030 }
8031 #endif
8032 #ifdef TARGET_NR_clock_nanosleep
8033 case TARGET_NR_clock_nanosleep:
8034 {
8035 struct timespec ts;
8036 target_to_host_timespec(&ts, arg3);
8037 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8038 if (arg4)
8039 host_to_target_timespec(arg4, &ts);
8040 break;
8041 }
8042 #endif
8043
8044 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8045 case TARGET_NR_set_tid_address:
8046 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8047 break;
8048 #endif
8049
8050 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8051 case TARGET_NR_tkill:
8052 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8053 break;
8054 #endif
8055
8056 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8057 case TARGET_NR_tgkill:
8058 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8059 target_to_host_signal(arg3)));
8060 break;
8061 #endif
8062
8063 #ifdef TARGET_NR_set_robust_list
8064 case TARGET_NR_set_robust_list:
8065 goto unimplemented_nowarn;
8066 #endif
8067
8068 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8069 case TARGET_NR_utimensat:
8070 {
8071 struct timespec *tsp, ts[2];
8072 if (!arg3) {
8073 tsp = NULL;
8074 } else {
8075 target_to_host_timespec(ts, arg3);
8076 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8077 tsp = ts;
8078 }
8079 if (!arg2)
8080 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8081 else {
8082 if (!(p = lock_user_string(arg2))) {
8083 ret = -TARGET_EFAULT;
8084 goto fail;
8085 }
8086 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8087 unlock_user(p, arg2, 0);
8088 }
8089 }
8090 break;
8091 #endif
8092 #if defined(CONFIG_USE_NPTL)
8093 case TARGET_NR_futex:
8094 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8095 break;
8096 #endif
8097 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8098 case TARGET_NR_inotify_init:
8099 ret = get_errno(sys_inotify_init());
8100 break;
8101 #endif
8102 #ifdef CONFIG_INOTIFY1
8103 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8104 case TARGET_NR_inotify_init1:
8105 ret = get_errno(sys_inotify_init1(arg1));
8106 break;
8107 #endif
8108 #endif
8109 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8110 case TARGET_NR_inotify_add_watch:
8111 p = lock_user_string(arg2);
8112 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8113 unlock_user(p, arg2, 0);
8114 break;
8115 #endif
8116 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8117 case TARGET_NR_inotify_rm_watch:
8118 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8119 break;
8120 #endif
8121
8122 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8123 case TARGET_NR_mq_open:
8124 {
8125 struct mq_attr posix_mq_attr;
8126
8127 p = lock_user_string(arg1 - 1);
8128 if (arg4 != 0)
8129 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8130 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8131 unlock_user (p, arg1, 0);
8132 }
8133 break;
8134
8135 case TARGET_NR_mq_unlink:
8136 p = lock_user_string(arg1 - 1);
8137 ret = get_errno(mq_unlink(p));
8138 unlock_user (p, arg1, 0);
8139 break;
8140
8141 case TARGET_NR_mq_timedsend:
8142 {
8143 struct timespec ts;
8144
8145 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8146 if (arg5 != 0) {
8147 target_to_host_timespec(&ts, arg5);
8148 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8149 host_to_target_timespec(arg5, &ts);
8150 }
8151 else
8152 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8153 unlock_user (p, arg2, arg3);
8154 }
8155 break;
8156
8157 case TARGET_NR_mq_timedreceive:
8158 {
8159 struct timespec ts;
8160 unsigned int prio;
8161
8162 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8163 if (arg5 != 0) {
8164 target_to_host_timespec(&ts, arg5);
8165 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8166 host_to_target_timespec(arg5, &ts);
8167 }
8168 else
8169 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8170 unlock_user (p, arg2, arg3);
8171 if (arg4 != 0)
8172 put_user_u32(prio, arg4);
8173 }
8174 break;
8175
8176 /* Not implemented for now... */
8177 /* case TARGET_NR_mq_notify: */
8178 /* break; */
8179
8180 case TARGET_NR_mq_getsetattr:
8181 {
8182 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8183 ret = 0;
8184 if (arg3 != 0) {
8185 ret = mq_getattr(arg1, &posix_mq_attr_out);
8186 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8187 }
8188 if (arg2 != 0) {
8189 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8190 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8191 }
8192
8193 }
8194 break;
8195 #endif
8196
8197 #ifdef CONFIG_SPLICE
8198 #ifdef TARGET_NR_tee
8199 case TARGET_NR_tee:
8200 {
8201 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8202 }
8203 break;
8204 #endif
8205 #ifdef TARGET_NR_splice
8206 case TARGET_NR_splice:
8207 {
8208 loff_t loff_in, loff_out;
8209 loff_t *ploff_in = NULL, *ploff_out = NULL;
8210 if(arg2) {
8211 get_user_u64(loff_in, arg2);
8212 ploff_in = &loff_in;
8213 }
8214 if(arg4) {
8215 get_user_u64(loff_out, arg2);
8216 ploff_out = &loff_out;
8217 }
8218 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8219 }
8220 break;
8221 #endif
8222 #ifdef TARGET_NR_vmsplice
8223 case TARGET_NR_vmsplice:
8224 {
8225 int count = arg3;
8226 struct iovec *vec;
8227
8228 vec = alloca(count * sizeof(struct iovec));
8229 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
8230 goto efault;
8231 ret = get_errno(vmsplice(arg1, vec, count, arg4));
8232 unlock_iovec(vec, arg2, count, 0);
8233 }
8234 break;
8235 #endif
8236 #endif /* CONFIG_SPLICE */
8237 #ifdef CONFIG_EVENTFD
8238 #if defined(TARGET_NR_eventfd)
8239 case TARGET_NR_eventfd:
8240 ret = get_errno(eventfd(arg1, 0));
8241 break;
8242 #endif
8243 #if defined(TARGET_NR_eventfd2)
8244 case TARGET_NR_eventfd2:
8245 ret = get_errno(eventfd(arg1, arg2));
8246 break;
8247 #endif
8248 #endif /* CONFIG_EVENTFD */
8249 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8250 case TARGET_NR_fallocate:
8251 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8252 break;
8253 #endif
8254 #if defined(CONFIG_SYNC_FILE_RANGE)
8255 #if defined(TARGET_NR_sync_file_range)
8256 case TARGET_NR_sync_file_range:
8257 #if TARGET_ABI_BITS == 32
8258 #if defined(TARGET_MIPS)
8259 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8260 target_offset64(arg5, arg6), arg7));
8261 #else
8262 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8263 target_offset64(arg4, arg5), arg6));
8264 #endif /* !TARGET_MIPS */
8265 #else
8266 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8267 #endif
8268 break;
8269 #endif
8270 #if defined(TARGET_NR_sync_file_range2)
8271 case TARGET_NR_sync_file_range2:
8272 /* This is like sync_file_range but the arguments are reordered */
8273 #if TARGET_ABI_BITS == 32
8274 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8275 target_offset64(arg5, arg6), arg2));
8276 #else
8277 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8278 #endif
8279 break;
8280 #endif
8281 #endif
8282 #if defined(CONFIG_EPOLL)
8283 #if defined(TARGET_NR_epoll_create)
8284 case TARGET_NR_epoll_create:
8285 ret = get_errno(epoll_create(arg1));
8286 break;
8287 #endif
8288 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8289 case TARGET_NR_epoll_create1:
8290 ret = get_errno(epoll_create1(arg1));
8291 break;
8292 #endif
8293 #if defined(TARGET_NR_epoll_ctl)
8294 case TARGET_NR_epoll_ctl:
8295 {
8296 struct epoll_event ep;
8297 struct epoll_event *epp = 0;
8298 if (arg4) {
8299 struct target_epoll_event *target_ep;
8300 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8301 goto efault;
8302 }
8303 ep.events = tswap32(target_ep->events);
8304 /* The epoll_data_t union is just opaque data to the kernel,
8305 * so we transfer all 64 bits across and need not worry what
8306 * actual data type it is.
8307 */
8308 ep.data.u64 = tswap64(target_ep->data.u64);
8309 unlock_user_struct(target_ep, arg4, 0);
8310 epp = &ep;
8311 }
8312 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8313 break;
8314 }
8315 #endif
8316
8317 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8318 #define IMPLEMENT_EPOLL_PWAIT
8319 #endif
8320 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8321 #if defined(TARGET_NR_epoll_wait)
8322 case TARGET_NR_epoll_wait:
8323 #endif
8324 #if defined(IMPLEMENT_EPOLL_PWAIT)
8325 case TARGET_NR_epoll_pwait:
8326 #endif
8327 {
8328 struct target_epoll_event *target_ep;
8329 struct epoll_event *ep;
8330 int epfd = arg1;
8331 int maxevents = arg3;
8332 int timeout = arg4;
8333
8334 target_ep = lock_user(VERIFY_WRITE, arg2,
8335 maxevents * sizeof(struct target_epoll_event), 1);
8336 if (!target_ep) {
8337 goto efault;
8338 }
8339
8340 ep = alloca(maxevents * sizeof(struct epoll_event));
8341
8342 switch (num) {
8343 #if defined(IMPLEMENT_EPOLL_PWAIT)
8344 case TARGET_NR_epoll_pwait:
8345 {
8346 target_sigset_t *target_set;
8347 sigset_t _set, *set = &_set;
8348
8349 if (arg5) {
8350 target_set = lock_user(VERIFY_READ, arg5,
8351 sizeof(target_sigset_t), 1);
8352 if (!target_set) {
8353 unlock_user(target_ep, arg2, 0);
8354 goto efault;
8355 }
8356 target_to_host_sigset(set, target_set);
8357 unlock_user(target_set, arg5, 0);
8358 } else {
8359 set = NULL;
8360 }
8361
8362 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8363 break;
8364 }
8365 #endif
8366 #if defined(TARGET_NR_epoll_wait)
8367 case TARGET_NR_epoll_wait:
8368 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8369 break;
8370 #endif
8371 default:
8372 ret = -TARGET_ENOSYS;
8373 }
8374 if (!is_error(ret)) {
8375 int i;
8376 for (i = 0; i < ret; i++) {
8377 target_ep[i].events = tswap32(ep[i].events);
8378 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8379 }
8380 }
8381 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8382 break;
8383 }
8384 #endif
8385 #endif
8386 #ifdef TARGET_NR_prlimit64
8387 case TARGET_NR_prlimit64:
8388 {
8389 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8390 struct target_rlimit64 *target_rnew, *target_rold;
8391 struct host_rlimit64 rnew, rold, *rnewp = 0;
8392 if (arg3) {
8393 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8394 goto efault;
8395 }
8396 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8397 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8398 unlock_user_struct(target_rnew, arg3, 0);
8399 rnewp = &rnew;
8400 }
8401
8402 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8403 if (!is_error(ret) && arg4) {
8404 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8405 goto efault;
8406 }
8407 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8408 target_rold->rlim_max = tswap64(rold.rlim_max);
8409 unlock_user_struct(target_rold, arg4, 1);
8410 }
8411 break;
8412 }
8413 #endif
8414 default:
8415 unimplemented:
8416 gemu_log("qemu: Unsupported syscall: %d\n", num);
8417 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8418 unimplemented_nowarn:
8419 #endif
8420 ret = -TARGET_ENOSYS;
8421 break;
8422 }
8423 fail:
8424 #ifdef DEBUG
8425 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8426 #endif
8427 if(do_strace)
8428 print_syscall_ret(num, ret);
8429 return ret;
8430 efault:
8431 ret = -TARGET_EFAULT;
8432 goto fail;
8433 }