]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
151f4f32726c62f65b1fb69e8b1cb33f0a6b85d7
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81
82 #define termios host_termios
83 #define winsize host_winsize
84 #define termio host_termio
85 #define sgttyb host_sgttyb /* same as target */
86 #define tchars host_tchars /* same as target */
87 #define ltchars host_ltchars /* same as target */
88
89 #include <linux/termios.h>
90 #include <linux/unistd.h>
91 #include <linux/utsname.h>
92 #include <linux/cdrom.h>
93 #include <linux/hdreg.h>
94 #include <linux/soundcard.h>
95 #include <linux/kd.h>
96 #include <linux/mtio.h>
97 #include <linux/fs.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
100 #endif
101 #include <linux/fb.h>
102 #include <linux/vt.h>
103 #include <linux/dm-ioctl.h>
104 #include "linux_loop.h"
105 #include "cpu-uname.h"
106
107 #include "qemu.h"
108
109 #if defined(CONFIG_USE_NPTL)
110 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
111 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
112 #else
113 /* XXX: Hardcode the above values. */
114 #define CLONE_NPTL_FLAGS2 0
115 #endif
116
117 //#define DEBUG
118
119 //#include <linux/msdos_fs.h>
120 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
121 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
122
123
124 #undef _syscall0
125 #undef _syscall1
126 #undef _syscall2
127 #undef _syscall3
128 #undef _syscall4
129 #undef _syscall5
130 #undef _syscall6
131
132 #define _syscall0(type,name) \
133 static type name (void) \
134 { \
135 return syscall(__NR_##name); \
136 }
137
138 #define _syscall1(type,name,type1,arg1) \
139 static type name (type1 arg1) \
140 { \
141 return syscall(__NR_##name, arg1); \
142 }
143
144 #define _syscall2(type,name,type1,arg1,type2,arg2) \
145 static type name (type1 arg1,type2 arg2) \
146 { \
147 return syscall(__NR_##name, arg1, arg2); \
148 }
149
150 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
151 static type name (type1 arg1,type2 arg2,type3 arg3) \
152 { \
153 return syscall(__NR_##name, arg1, arg2, arg3); \
154 }
155
156 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
158 { \
159 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
160 }
161
162 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
165 { \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
167 }
168
169
170 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 type5,arg5,type6,arg6) \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
173 type6 arg6) \
174 { \
175 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
176 }
177
178
179 #define __NR_sys_uname __NR_uname
180 #define __NR_sys_faccessat __NR_faccessat
181 #define __NR_sys_fchmodat __NR_fchmodat
182 #define __NR_sys_fchownat __NR_fchownat
183 #define __NR_sys_fstatat64 __NR_fstatat64
184 #define __NR_sys_futimesat __NR_futimesat
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_linkat __NR_linkat
190 #define __NR_sys_mkdirat __NR_mkdirat
191 #define __NR_sys_mknodat __NR_mknodat
192 #define __NR_sys_newfstatat __NR_newfstatat
193 #define __NR_sys_openat __NR_openat
194 #define __NR_sys_readlinkat __NR_readlinkat
195 #define __NR_sys_renameat __NR_renameat
196 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
197 #define __NR_sys_symlinkat __NR_symlinkat
198 #define __NR_sys_syslog __NR_syslog
199 #define __NR_sys_tgkill __NR_tgkill
200 #define __NR_sys_tkill __NR_tkill
201 #define __NR_sys_unlinkat __NR_unlinkat
202 #define __NR_sys_utimensat __NR_utimensat
203 #define __NR_sys_futex __NR_futex
204 #define __NR_sys_inotify_init __NR_inotify_init
205 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
206 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
207
208 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
209 defined(__s390x__)
210 #define __NR__llseek __NR_lseek
211 #endif
212
213 #ifdef __NR_gettid
214 _syscall0(int, gettid)
215 #else
216 /* This is a replacement for the host gettid() and must return a host
217 errno. */
218 static int gettid(void) {
219 return -ENOSYS;
220 }
221 #endif
222 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
223 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
224 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
225 #endif
226 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
227 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
228 loff_t *, res, uint, wh);
229 #endif
230 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
231 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
232 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
233 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
234 #endif
235 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
236 _syscall2(int,sys_tkill,int,tid,int,sig)
237 #endif
238 #ifdef __NR_exit_group
239 _syscall1(int,exit_group,int,error_code)
240 #endif
241 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
242 _syscall1(int,set_tid_address,int *,tidptr)
243 #endif
244 #if defined(CONFIG_USE_NPTL)
245 #if defined(TARGET_NR_futex) && defined(__NR_futex)
246 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
247 const struct timespec *,timeout,int *,uaddr2,int,val3)
248 #endif
249 #endif
250 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
251 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
252 unsigned long *, user_mask_ptr);
253 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
254 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
255 unsigned long *, user_mask_ptr);
256 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
257 void *, arg);
258
259 static bitmask_transtbl fcntl_flags_tbl[] = {
260 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
261 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
262 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
263 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
264 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
265 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
266 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
267 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
268 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
269 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
270 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
271 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
272 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
275 #endif
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
278 #endif
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
281 #endif
282 #if defined(O_PATH)
283 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
284 #endif
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
288 #endif
289 { 0, 0, 0, 0 }
290 };
291
292 #define COPY_UTSNAME_FIELD(dest, src) \
293 do { \
294 /* __NEW_UTS_LEN doesn't include terminating null */ \
295 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
296 (dest)[__NEW_UTS_LEN] = '\0'; \
297 } while (0)
298
299 static int sys_uname(struct new_utsname *buf)
300 {
301 struct utsname uts_buf;
302
303 if (uname(&uts_buf) < 0)
304 return (-1);
305
306 /*
307 * Just in case these have some differences, we
308 * translate utsname to new_utsname (which is the
309 * struct linux kernel uses).
310 */
311
312 memset(buf, 0, sizeof(*buf));
313 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
314 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
315 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
316 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
317 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
318 #ifdef _GNU_SOURCE
319 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
320 #endif
321 return (0);
322
323 #undef COPY_UTSNAME_FIELD
324 }
325
326 static int sys_getcwd1(char *buf, size_t size)
327 {
328 if (getcwd(buf, size) == NULL) {
329 /* getcwd() sets errno */
330 return (-1);
331 }
332 return strlen(buf)+1;
333 }
334
335 #ifdef CONFIG_ATFILE
336 /*
337 * Host system seems to have atfile syscall stubs available. We
338 * now enable them one by one as specified by target syscall_nr.h.
339 */
340
341 #ifdef TARGET_NR_faccessat
342 static int sys_faccessat(int dirfd, const char *pathname, int mode)
343 {
344 return (faccessat(dirfd, pathname, mode, 0));
345 }
346 #endif
347 #ifdef TARGET_NR_fchmodat
348 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
349 {
350 return (fchmodat(dirfd, pathname, mode, 0));
351 }
352 #endif
353 #if defined(TARGET_NR_fchownat)
354 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
355 gid_t group, int flags)
356 {
357 return (fchownat(dirfd, pathname, owner, group, flags));
358 }
359 #endif
360 #ifdef __NR_fstatat64
361 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
362 int flags)
363 {
364 return (fstatat(dirfd, pathname, buf, flags));
365 }
366 #endif
367 #ifdef __NR_newfstatat
368 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
369 int flags)
370 {
371 return (fstatat(dirfd, pathname, buf, flags));
372 }
373 #endif
374 #ifdef TARGET_NR_futimesat
375 static int sys_futimesat(int dirfd, const char *pathname,
376 const struct timeval times[2])
377 {
378 return (futimesat(dirfd, pathname, times));
379 }
380 #endif
381 #ifdef TARGET_NR_linkat
382 static int sys_linkat(int olddirfd, const char *oldpath,
383 int newdirfd, const char *newpath, int flags)
384 {
385 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
386 }
387 #endif
388 #ifdef TARGET_NR_mkdirat
389 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
390 {
391 return (mkdirat(dirfd, pathname, mode));
392 }
393 #endif
394 #ifdef TARGET_NR_mknodat
395 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
396 dev_t dev)
397 {
398 return (mknodat(dirfd, pathname, mode, dev));
399 }
400 #endif
401 #ifdef TARGET_NR_openat
402 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
403 {
404 /*
405 * open(2) has extra parameter 'mode' when called with
406 * flag O_CREAT.
407 */
408 if ((flags & O_CREAT) != 0) {
409 return (openat(dirfd, pathname, flags, mode));
410 }
411 return (openat(dirfd, pathname, flags));
412 }
413 #endif
414 #ifdef TARGET_NR_readlinkat
415 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
416 {
417 return (readlinkat(dirfd, pathname, buf, bufsiz));
418 }
419 #endif
420 #ifdef TARGET_NR_renameat
421 static int sys_renameat(int olddirfd, const char *oldpath,
422 int newdirfd, const char *newpath)
423 {
424 return (renameat(olddirfd, oldpath, newdirfd, newpath));
425 }
426 #endif
427 #ifdef TARGET_NR_symlinkat
428 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
429 {
430 return (symlinkat(oldpath, newdirfd, newpath));
431 }
432 #endif
433 #ifdef TARGET_NR_unlinkat
434 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
435 {
436 return (unlinkat(dirfd, pathname, flags));
437 }
438 #endif
439 #else /* !CONFIG_ATFILE */
440
441 /*
442 * Try direct syscalls instead
443 */
444 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
445 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
446 #endif
447 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
448 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
449 #endif
450 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
451 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
452 uid_t,owner,gid_t,group,int,flags)
453 #endif
454 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
455 defined(__NR_fstatat64)
456 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
457 struct stat *,buf,int,flags)
458 #endif
459 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
460 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
461 const struct timeval *,times)
462 #endif
463 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
464 defined(__NR_newfstatat)
465 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
466 struct stat *,buf,int,flags)
467 #endif
468 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
469 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
470 int,newdirfd,const char *,newpath,int,flags)
471 #endif
472 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
473 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
474 #endif
475 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
476 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
477 mode_t,mode,dev_t,dev)
478 #endif
479 #if defined(TARGET_NR_openat) && defined(__NR_openat)
480 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
481 #endif
482 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
483 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
484 char *,buf,size_t,bufsize)
485 #endif
486 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
487 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
488 int,newdirfd,const char *,newpath)
489 #endif
490 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
491 _syscall3(int,sys_symlinkat,const char *,oldpath,
492 int,newdirfd,const char *,newpath)
493 #endif
494 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
495 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
496 #endif
497
498 #endif /* CONFIG_ATFILE */
499
500 #ifdef CONFIG_UTIMENSAT
501 static int sys_utimensat(int dirfd, const char *pathname,
502 const struct timespec times[2], int flags)
503 {
504 if (pathname == NULL)
505 return futimens(dirfd, times);
506 else
507 return utimensat(dirfd, pathname, times, flags);
508 }
509 #else
510 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
511 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
512 const struct timespec *,tsp,int,flags)
513 #endif
514 #endif /* CONFIG_UTIMENSAT */
515
516 #ifdef CONFIG_INOTIFY
517 #include <sys/inotify.h>
518
519 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
520 static int sys_inotify_init(void)
521 {
522 return (inotify_init());
523 }
524 #endif
525 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
526 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
527 {
528 return (inotify_add_watch(fd, pathname, mask));
529 }
530 #endif
531 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
532 static int sys_inotify_rm_watch(int fd, int32_t wd)
533 {
534 return (inotify_rm_watch(fd, wd));
535 }
536 #endif
537 #ifdef CONFIG_INOTIFY1
538 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
539 static int sys_inotify_init1(int flags)
540 {
541 return (inotify_init1(flags));
542 }
543 #endif
544 #endif
545 #else
546 /* Userspace can usually survive runtime without inotify */
547 #undef TARGET_NR_inotify_init
548 #undef TARGET_NR_inotify_init1
549 #undef TARGET_NR_inotify_add_watch
550 #undef TARGET_NR_inotify_rm_watch
551 #endif /* CONFIG_INOTIFY */
552
553 #if defined(TARGET_NR_ppoll)
554 #ifndef __NR_ppoll
555 # define __NR_ppoll -1
556 #endif
557 #define __NR_sys_ppoll __NR_ppoll
558 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
559 struct timespec *, timeout, const __sigset_t *, sigmask,
560 size_t, sigsetsize)
561 #endif
562
563 #if defined(TARGET_NR_pselect6)
564 #ifndef __NR_pselect6
565 # define __NR_pselect6 -1
566 #endif
567 #define __NR_sys_pselect6 __NR_pselect6
568 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
569 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
570 #endif
571
572 #if defined(TARGET_NR_prlimit64)
573 #ifndef __NR_prlimit64
574 # define __NR_prlimit64 -1
575 #endif
576 #define __NR_sys_prlimit64 __NR_prlimit64
577 /* The glibc rlimit structure may not be that used by the underlying syscall */
578 struct host_rlimit64 {
579 uint64_t rlim_cur;
580 uint64_t rlim_max;
581 };
582 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
583 const struct host_rlimit64 *, new_limit,
584 struct host_rlimit64 *, old_limit)
585 #endif
586
587 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
588 #ifdef TARGET_ARM
589 static inline int regpairs_aligned(void *cpu_env) {
590 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
591 }
592 #elif defined(TARGET_MIPS)
593 static inline int regpairs_aligned(void *cpu_env) { return 1; }
594 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
595 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
596 * of registers which translates to the same as ARM/MIPS, because we start with
597 * r3 as arg1 */
598 static inline int regpairs_aligned(void *cpu_env) { return 1; }
599 #else
600 static inline int regpairs_aligned(void *cpu_env) { return 0; }
601 #endif
602
603 #define ERRNO_TABLE_SIZE 1200
604
605 /* target_to_host_errno_table[] is initialized from
606 * host_to_target_errno_table[] in syscall_init(). */
607 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
608 };
609
610 /*
611 * This list is the union of errno values overridden in asm-<arch>/errno.h
612 * minus the errnos that are not actually generic to all archs.
613 */
614 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
615 [EIDRM] = TARGET_EIDRM,
616 [ECHRNG] = TARGET_ECHRNG,
617 [EL2NSYNC] = TARGET_EL2NSYNC,
618 [EL3HLT] = TARGET_EL3HLT,
619 [EL3RST] = TARGET_EL3RST,
620 [ELNRNG] = TARGET_ELNRNG,
621 [EUNATCH] = TARGET_EUNATCH,
622 [ENOCSI] = TARGET_ENOCSI,
623 [EL2HLT] = TARGET_EL2HLT,
624 [EDEADLK] = TARGET_EDEADLK,
625 [ENOLCK] = TARGET_ENOLCK,
626 [EBADE] = TARGET_EBADE,
627 [EBADR] = TARGET_EBADR,
628 [EXFULL] = TARGET_EXFULL,
629 [ENOANO] = TARGET_ENOANO,
630 [EBADRQC] = TARGET_EBADRQC,
631 [EBADSLT] = TARGET_EBADSLT,
632 [EBFONT] = TARGET_EBFONT,
633 [ENOSTR] = TARGET_ENOSTR,
634 [ENODATA] = TARGET_ENODATA,
635 [ETIME] = TARGET_ETIME,
636 [ENOSR] = TARGET_ENOSR,
637 [ENONET] = TARGET_ENONET,
638 [ENOPKG] = TARGET_ENOPKG,
639 [EREMOTE] = TARGET_EREMOTE,
640 [ENOLINK] = TARGET_ENOLINK,
641 [EADV] = TARGET_EADV,
642 [ESRMNT] = TARGET_ESRMNT,
643 [ECOMM] = TARGET_ECOMM,
644 [EPROTO] = TARGET_EPROTO,
645 [EDOTDOT] = TARGET_EDOTDOT,
646 [EMULTIHOP] = TARGET_EMULTIHOP,
647 [EBADMSG] = TARGET_EBADMSG,
648 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
649 [EOVERFLOW] = TARGET_EOVERFLOW,
650 [ENOTUNIQ] = TARGET_ENOTUNIQ,
651 [EBADFD] = TARGET_EBADFD,
652 [EREMCHG] = TARGET_EREMCHG,
653 [ELIBACC] = TARGET_ELIBACC,
654 [ELIBBAD] = TARGET_ELIBBAD,
655 [ELIBSCN] = TARGET_ELIBSCN,
656 [ELIBMAX] = TARGET_ELIBMAX,
657 [ELIBEXEC] = TARGET_ELIBEXEC,
658 [EILSEQ] = TARGET_EILSEQ,
659 [ENOSYS] = TARGET_ENOSYS,
660 [ELOOP] = TARGET_ELOOP,
661 [ERESTART] = TARGET_ERESTART,
662 [ESTRPIPE] = TARGET_ESTRPIPE,
663 [ENOTEMPTY] = TARGET_ENOTEMPTY,
664 [EUSERS] = TARGET_EUSERS,
665 [ENOTSOCK] = TARGET_ENOTSOCK,
666 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
667 [EMSGSIZE] = TARGET_EMSGSIZE,
668 [EPROTOTYPE] = TARGET_EPROTOTYPE,
669 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
670 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
671 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
672 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
673 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
674 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
675 [EADDRINUSE] = TARGET_EADDRINUSE,
676 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
677 [ENETDOWN] = TARGET_ENETDOWN,
678 [ENETUNREACH] = TARGET_ENETUNREACH,
679 [ENETRESET] = TARGET_ENETRESET,
680 [ECONNABORTED] = TARGET_ECONNABORTED,
681 [ECONNRESET] = TARGET_ECONNRESET,
682 [ENOBUFS] = TARGET_ENOBUFS,
683 [EISCONN] = TARGET_EISCONN,
684 [ENOTCONN] = TARGET_ENOTCONN,
685 [EUCLEAN] = TARGET_EUCLEAN,
686 [ENOTNAM] = TARGET_ENOTNAM,
687 [ENAVAIL] = TARGET_ENAVAIL,
688 [EISNAM] = TARGET_EISNAM,
689 [EREMOTEIO] = TARGET_EREMOTEIO,
690 [ESHUTDOWN] = TARGET_ESHUTDOWN,
691 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
692 [ETIMEDOUT] = TARGET_ETIMEDOUT,
693 [ECONNREFUSED] = TARGET_ECONNREFUSED,
694 [EHOSTDOWN] = TARGET_EHOSTDOWN,
695 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
696 [EALREADY] = TARGET_EALREADY,
697 [EINPROGRESS] = TARGET_EINPROGRESS,
698 [ESTALE] = TARGET_ESTALE,
699 [ECANCELED] = TARGET_ECANCELED,
700 [ENOMEDIUM] = TARGET_ENOMEDIUM,
701 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
702 #ifdef ENOKEY
703 [ENOKEY] = TARGET_ENOKEY,
704 #endif
705 #ifdef EKEYEXPIRED
706 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
707 #endif
708 #ifdef EKEYREVOKED
709 [EKEYREVOKED] = TARGET_EKEYREVOKED,
710 #endif
711 #ifdef EKEYREJECTED
712 [EKEYREJECTED] = TARGET_EKEYREJECTED,
713 #endif
714 #ifdef EOWNERDEAD
715 [EOWNERDEAD] = TARGET_EOWNERDEAD,
716 #endif
717 #ifdef ENOTRECOVERABLE
718 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
719 #endif
720 };
721
722 static inline int host_to_target_errno(int err)
723 {
724 if(host_to_target_errno_table[err])
725 return host_to_target_errno_table[err];
726 return err;
727 }
728
729 static inline int target_to_host_errno(int err)
730 {
731 if (target_to_host_errno_table[err])
732 return target_to_host_errno_table[err];
733 return err;
734 }
735
736 static inline abi_long get_errno(abi_long ret)
737 {
738 if (ret == -1)
739 return -host_to_target_errno(errno);
740 else
741 return ret;
742 }
743
744 static inline int is_error(abi_long ret)
745 {
746 return (abi_ulong)ret >= (abi_ulong)(-4096);
747 }
748
749 char *target_strerror(int err)
750 {
751 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
752 return NULL;
753 }
754 return strerror(target_to_host_errno(err));
755 }
756
757 static abi_ulong target_brk;
758 static abi_ulong target_original_brk;
759 static abi_ulong brk_page;
760
761 void target_set_brk(abi_ulong new_brk)
762 {
763 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
764 brk_page = HOST_PAGE_ALIGN(target_brk);
765 }
766
767 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
768 #define DEBUGF_BRK(message, args...)
769
770 /* do_brk() must return target values and target errnos. */
771 abi_long do_brk(abi_ulong new_brk)
772 {
773 abi_long mapped_addr;
774 int new_alloc_size;
775
776 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
777
778 if (!new_brk) {
779 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
780 return target_brk;
781 }
782 if (new_brk < target_original_brk) {
783 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
784 target_brk);
785 return target_brk;
786 }
787
788 /* If the new brk is less than the highest page reserved to the
789 * target heap allocation, set it and we're almost done... */
790 if (new_brk <= brk_page) {
791 /* Heap contents are initialized to zero, as for anonymous
792 * mapped pages. */
793 if (new_brk > target_brk) {
794 memset(g2h(target_brk), 0, new_brk - target_brk);
795 }
796 target_brk = new_brk;
797 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
798 return target_brk;
799 }
800
801 /* We need to allocate more memory after the brk... Note that
802 * we don't use MAP_FIXED because that will map over the top of
803 * any existing mapping (like the one with the host libc or qemu
804 * itself); instead we treat "mapped but at wrong address" as
805 * a failure and unmap again.
806 */
807 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
808 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
809 PROT_READ|PROT_WRITE,
810 MAP_ANON|MAP_PRIVATE, 0, 0));
811
812 if (mapped_addr == brk_page) {
813 /* Heap contents are initialized to zero, as for anonymous
814 * mapped pages. Technically the new pages are already
815 * initialized to zero since they *are* anonymous mapped
816 * pages, however we have to take care with the contents that
817 * come from the remaining part of the previous page: it may
818 * contains garbage data due to a previous heap usage (grown
819 * then shrunken). */
820 memset(g2h(target_brk), 0, brk_page - target_brk);
821
822 target_brk = new_brk;
823 brk_page = HOST_PAGE_ALIGN(target_brk);
824 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
825 target_brk);
826 return target_brk;
827 } else if (mapped_addr != -1) {
828 /* Mapped but at wrong address, meaning there wasn't actually
829 * enough space for this brk.
830 */
831 target_munmap(mapped_addr, new_alloc_size);
832 mapped_addr = -1;
833 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
834 }
835 else {
836 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
837 }
838
839 #if defined(TARGET_ALPHA)
840 /* We (partially) emulate OSF/1 on Alpha, which requires we
841 return a proper errno, not an unchanged brk value. */
842 return -TARGET_ENOMEM;
843 #endif
844 /* For everything else, return the previous break. */
845 return target_brk;
846 }
847
848 static inline abi_long copy_from_user_fdset(fd_set *fds,
849 abi_ulong target_fds_addr,
850 int n)
851 {
852 int i, nw, j, k;
853 abi_ulong b, *target_fds;
854
855 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
856 if (!(target_fds = lock_user(VERIFY_READ,
857 target_fds_addr,
858 sizeof(abi_ulong) * nw,
859 1)))
860 return -TARGET_EFAULT;
861
862 FD_ZERO(fds);
863 k = 0;
864 for (i = 0; i < nw; i++) {
865 /* grab the abi_ulong */
866 __get_user(b, &target_fds[i]);
867 for (j = 0; j < TARGET_ABI_BITS; j++) {
868 /* check the bit inside the abi_ulong */
869 if ((b >> j) & 1)
870 FD_SET(k, fds);
871 k++;
872 }
873 }
874
875 unlock_user(target_fds, target_fds_addr, 0);
876
877 return 0;
878 }
879
880 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
881 abi_ulong target_fds_addr,
882 int n)
883 {
884 if (target_fds_addr) {
885 if (copy_from_user_fdset(fds, target_fds_addr, n))
886 return -TARGET_EFAULT;
887 *fds_ptr = fds;
888 } else {
889 *fds_ptr = NULL;
890 }
891 return 0;
892 }
893
894 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
895 const fd_set *fds,
896 int n)
897 {
898 int i, nw, j, k;
899 abi_long v;
900 abi_ulong *target_fds;
901
902 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
903 if (!(target_fds = lock_user(VERIFY_WRITE,
904 target_fds_addr,
905 sizeof(abi_ulong) * nw,
906 0)))
907 return -TARGET_EFAULT;
908
909 k = 0;
910 for (i = 0; i < nw; i++) {
911 v = 0;
912 for (j = 0; j < TARGET_ABI_BITS; j++) {
913 v |= ((FD_ISSET(k, fds) != 0) << j);
914 k++;
915 }
916 __put_user(v, &target_fds[i]);
917 }
918
919 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
920
921 return 0;
922 }
923
924 #if defined(__alpha__)
925 #define HOST_HZ 1024
926 #else
927 #define HOST_HZ 100
928 #endif
929
930 static inline abi_long host_to_target_clock_t(long ticks)
931 {
932 #if HOST_HZ == TARGET_HZ
933 return ticks;
934 #else
935 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
936 #endif
937 }
938
939 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
940 const struct rusage *rusage)
941 {
942 struct target_rusage *target_rusage;
943
944 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
945 return -TARGET_EFAULT;
946 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
947 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
948 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
949 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
950 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
951 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
952 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
953 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
954 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
955 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
956 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
957 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
958 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
959 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
960 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
961 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
962 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
963 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
964 unlock_user_struct(target_rusage, target_addr, 1);
965
966 return 0;
967 }
968
969 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
970 {
971 abi_ulong target_rlim_swap;
972 rlim_t result;
973
974 target_rlim_swap = tswapal(target_rlim);
975 if (target_rlim_swap == TARGET_RLIM_INFINITY)
976 return RLIM_INFINITY;
977
978 result = target_rlim_swap;
979 if (target_rlim_swap != (rlim_t)result)
980 return RLIM_INFINITY;
981
982 return result;
983 }
984
985 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
986 {
987 abi_ulong target_rlim_swap;
988 abi_ulong result;
989
990 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
991 target_rlim_swap = TARGET_RLIM_INFINITY;
992 else
993 target_rlim_swap = rlim;
994 result = tswapal(target_rlim_swap);
995
996 return result;
997 }
998
999 static inline int target_to_host_resource(int code)
1000 {
1001 switch (code) {
1002 case TARGET_RLIMIT_AS:
1003 return RLIMIT_AS;
1004 case TARGET_RLIMIT_CORE:
1005 return RLIMIT_CORE;
1006 case TARGET_RLIMIT_CPU:
1007 return RLIMIT_CPU;
1008 case TARGET_RLIMIT_DATA:
1009 return RLIMIT_DATA;
1010 case TARGET_RLIMIT_FSIZE:
1011 return RLIMIT_FSIZE;
1012 case TARGET_RLIMIT_LOCKS:
1013 return RLIMIT_LOCKS;
1014 case TARGET_RLIMIT_MEMLOCK:
1015 return RLIMIT_MEMLOCK;
1016 case TARGET_RLIMIT_MSGQUEUE:
1017 return RLIMIT_MSGQUEUE;
1018 case TARGET_RLIMIT_NICE:
1019 return RLIMIT_NICE;
1020 case TARGET_RLIMIT_NOFILE:
1021 return RLIMIT_NOFILE;
1022 case TARGET_RLIMIT_NPROC:
1023 return RLIMIT_NPROC;
1024 case TARGET_RLIMIT_RSS:
1025 return RLIMIT_RSS;
1026 case TARGET_RLIMIT_RTPRIO:
1027 return RLIMIT_RTPRIO;
1028 case TARGET_RLIMIT_SIGPENDING:
1029 return RLIMIT_SIGPENDING;
1030 case TARGET_RLIMIT_STACK:
1031 return RLIMIT_STACK;
1032 default:
1033 return code;
1034 }
1035 }
1036
1037 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1038 abi_ulong target_tv_addr)
1039 {
1040 struct target_timeval *target_tv;
1041
1042 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1043 return -TARGET_EFAULT;
1044
1045 __get_user(tv->tv_sec, &target_tv->tv_sec);
1046 __get_user(tv->tv_usec, &target_tv->tv_usec);
1047
1048 unlock_user_struct(target_tv, target_tv_addr, 0);
1049
1050 return 0;
1051 }
1052
1053 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1054 const struct timeval *tv)
1055 {
1056 struct target_timeval *target_tv;
1057
1058 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1059 return -TARGET_EFAULT;
1060
1061 __put_user(tv->tv_sec, &target_tv->tv_sec);
1062 __put_user(tv->tv_usec, &target_tv->tv_usec);
1063
1064 unlock_user_struct(target_tv, target_tv_addr, 1);
1065
1066 return 0;
1067 }
1068
1069 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1070 #include <mqueue.h>
1071
1072 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1073 abi_ulong target_mq_attr_addr)
1074 {
1075 struct target_mq_attr *target_mq_attr;
1076
1077 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1078 target_mq_attr_addr, 1))
1079 return -TARGET_EFAULT;
1080
1081 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1082 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1083 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1084 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1085
1086 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1087
1088 return 0;
1089 }
1090
1091 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1092 const struct mq_attr *attr)
1093 {
1094 struct target_mq_attr *target_mq_attr;
1095
1096 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1097 target_mq_attr_addr, 0))
1098 return -TARGET_EFAULT;
1099
1100 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1101 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1102 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1103 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1104
1105 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1106
1107 return 0;
1108 }
1109 #endif
1110
1111 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1112 /* do_select() must return target values and target errnos. */
1113 static abi_long do_select(int n,
1114 abi_ulong rfd_addr, abi_ulong wfd_addr,
1115 abi_ulong efd_addr, abi_ulong target_tv_addr)
1116 {
1117 fd_set rfds, wfds, efds;
1118 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1119 struct timeval tv, *tv_ptr;
1120 abi_long ret;
1121
1122 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1123 if (ret) {
1124 return ret;
1125 }
1126 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1127 if (ret) {
1128 return ret;
1129 }
1130 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1131 if (ret) {
1132 return ret;
1133 }
1134
1135 if (target_tv_addr) {
1136 if (copy_from_user_timeval(&tv, target_tv_addr))
1137 return -TARGET_EFAULT;
1138 tv_ptr = &tv;
1139 } else {
1140 tv_ptr = NULL;
1141 }
1142
1143 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1144
1145 if (!is_error(ret)) {
1146 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1147 return -TARGET_EFAULT;
1148 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1149 return -TARGET_EFAULT;
1150 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1151 return -TARGET_EFAULT;
1152
1153 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1154 return -TARGET_EFAULT;
1155 }
1156
1157 return ret;
1158 }
1159 #endif
1160
1161 static abi_long do_pipe2(int host_pipe[], int flags)
1162 {
1163 #ifdef CONFIG_PIPE2
1164 return pipe2(host_pipe, flags);
1165 #else
1166 return -ENOSYS;
1167 #endif
1168 }
1169
1170 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1171 int flags, int is_pipe2)
1172 {
1173 int host_pipe[2];
1174 abi_long ret;
1175 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1176
1177 if (is_error(ret))
1178 return get_errno(ret);
1179
1180 /* Several targets have special calling conventions for the original
1181 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1182 if (!is_pipe2) {
1183 #if defined(TARGET_ALPHA)
1184 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1185 return host_pipe[0];
1186 #elif defined(TARGET_MIPS)
1187 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1188 return host_pipe[0];
1189 #elif defined(TARGET_SH4)
1190 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1191 return host_pipe[0];
1192 #endif
1193 }
1194
1195 if (put_user_s32(host_pipe[0], pipedes)
1196 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1197 return -TARGET_EFAULT;
1198 return get_errno(ret);
1199 }
1200
1201 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1202 abi_ulong target_addr,
1203 socklen_t len)
1204 {
1205 struct target_ip_mreqn *target_smreqn;
1206
1207 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1208 if (!target_smreqn)
1209 return -TARGET_EFAULT;
1210 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1211 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1212 if (len == sizeof(struct target_ip_mreqn))
1213 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1214 unlock_user(target_smreqn, target_addr, 0);
1215
1216 return 0;
1217 }
1218
1219 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1220 abi_ulong target_addr,
1221 socklen_t len)
1222 {
1223 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1224 sa_family_t sa_family;
1225 struct target_sockaddr *target_saddr;
1226
1227 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1228 if (!target_saddr)
1229 return -TARGET_EFAULT;
1230
1231 sa_family = tswap16(target_saddr->sa_family);
1232
1233 /* Oops. The caller might send a incomplete sun_path; sun_path
1234 * must be terminated by \0 (see the manual page), but
1235 * unfortunately it is quite common to specify sockaddr_un
1236 * length as "strlen(x->sun_path)" while it should be
1237 * "strlen(...) + 1". We'll fix that here if needed.
1238 * Linux kernel has a similar feature.
1239 */
1240
1241 if (sa_family == AF_UNIX) {
1242 if (len < unix_maxlen && len > 0) {
1243 char *cp = (char*)target_saddr;
1244
1245 if ( cp[len-1] && !cp[len] )
1246 len++;
1247 }
1248 if (len > unix_maxlen)
1249 len = unix_maxlen;
1250 }
1251
1252 memcpy(addr, target_saddr, len);
1253 addr->sa_family = sa_family;
1254 unlock_user(target_saddr, target_addr, 0);
1255
1256 return 0;
1257 }
1258
1259 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1260 struct sockaddr *addr,
1261 socklen_t len)
1262 {
1263 struct target_sockaddr *target_saddr;
1264
1265 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1266 if (!target_saddr)
1267 return -TARGET_EFAULT;
1268 memcpy(target_saddr, addr, len);
1269 target_saddr->sa_family = tswap16(addr->sa_family);
1270 unlock_user(target_saddr, target_addr, len);
1271
1272 return 0;
1273 }
1274
1275 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1276 struct target_msghdr *target_msgh)
1277 {
1278 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1279 abi_long msg_controllen;
1280 abi_ulong target_cmsg_addr;
1281 struct target_cmsghdr *target_cmsg;
1282 socklen_t space = 0;
1283
1284 msg_controllen = tswapal(target_msgh->msg_controllen);
1285 if (msg_controllen < sizeof (struct target_cmsghdr))
1286 goto the_end;
1287 target_cmsg_addr = tswapal(target_msgh->msg_control);
1288 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1289 if (!target_cmsg)
1290 return -TARGET_EFAULT;
1291
1292 while (cmsg && target_cmsg) {
1293 void *data = CMSG_DATA(cmsg);
1294 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1295
1296 int len = tswapal(target_cmsg->cmsg_len)
1297 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1298
1299 space += CMSG_SPACE(len);
1300 if (space > msgh->msg_controllen) {
1301 space -= CMSG_SPACE(len);
1302 gemu_log("Host cmsg overflow\n");
1303 break;
1304 }
1305
1306 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1307 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1308 cmsg->cmsg_len = CMSG_LEN(len);
1309
1310 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1311 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1312 memcpy(data, target_data, len);
1313 } else {
1314 int *fd = (int *)data;
1315 int *target_fd = (int *)target_data;
1316 int i, numfds = len / sizeof(int);
1317
1318 for (i = 0; i < numfds; i++)
1319 fd[i] = tswap32(target_fd[i]);
1320 }
1321
1322 cmsg = CMSG_NXTHDR(msgh, cmsg);
1323 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1324 }
1325 unlock_user(target_cmsg, target_cmsg_addr, 0);
1326 the_end:
1327 msgh->msg_controllen = space;
1328 return 0;
1329 }
1330
1331 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1332 struct msghdr *msgh)
1333 {
1334 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1335 abi_long msg_controllen;
1336 abi_ulong target_cmsg_addr;
1337 struct target_cmsghdr *target_cmsg;
1338 socklen_t space = 0;
1339
1340 msg_controllen = tswapal(target_msgh->msg_controllen);
1341 if (msg_controllen < sizeof (struct target_cmsghdr))
1342 goto the_end;
1343 target_cmsg_addr = tswapal(target_msgh->msg_control);
1344 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1345 if (!target_cmsg)
1346 return -TARGET_EFAULT;
1347
1348 while (cmsg && target_cmsg) {
1349 void *data = CMSG_DATA(cmsg);
1350 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1351
1352 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1353
1354 space += TARGET_CMSG_SPACE(len);
1355 if (space > msg_controllen) {
1356 space -= TARGET_CMSG_SPACE(len);
1357 gemu_log("Target cmsg overflow\n");
1358 break;
1359 }
1360
1361 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1362 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1363 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1364
1365 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1366 (cmsg->cmsg_type == SCM_RIGHTS)) {
1367 int *fd = (int *)data;
1368 int *target_fd = (int *)target_data;
1369 int i, numfds = len / sizeof(int);
1370
1371 for (i = 0; i < numfds; i++)
1372 target_fd[i] = tswap32(fd[i]);
1373 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1374 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1375 (len == sizeof(struct timeval))) {
1376 /* copy struct timeval to target */
1377 struct timeval *tv = (struct timeval *)data;
1378 struct target_timeval *target_tv =
1379 (struct target_timeval *)target_data;
1380
1381 target_tv->tv_sec = tswapal(tv->tv_sec);
1382 target_tv->tv_usec = tswapal(tv->tv_usec);
1383 } else {
1384 gemu_log("Unsupported ancillary data: %d/%d\n",
1385 cmsg->cmsg_level, cmsg->cmsg_type);
1386 memcpy(target_data, data, len);
1387 }
1388
1389 cmsg = CMSG_NXTHDR(msgh, cmsg);
1390 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1391 }
1392 unlock_user(target_cmsg, target_cmsg_addr, space);
1393 the_end:
1394 target_msgh->msg_controllen = tswapal(space);
1395 return 0;
1396 }
1397
1398 /* do_setsockopt() Must return target values and target errnos. */
1399 static abi_long do_setsockopt(int sockfd, int level, int optname,
1400 abi_ulong optval_addr, socklen_t optlen)
1401 {
1402 abi_long ret;
1403 int val;
1404 struct ip_mreqn *ip_mreq;
1405 struct ip_mreq_source *ip_mreq_source;
1406
1407 switch(level) {
1408 case SOL_TCP:
1409 /* TCP options all take an 'int' value. */
1410 if (optlen < sizeof(uint32_t))
1411 return -TARGET_EINVAL;
1412
1413 if (get_user_u32(val, optval_addr))
1414 return -TARGET_EFAULT;
1415 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1416 break;
1417 case SOL_IP:
1418 switch(optname) {
1419 case IP_TOS:
1420 case IP_TTL:
1421 case IP_HDRINCL:
1422 case IP_ROUTER_ALERT:
1423 case IP_RECVOPTS:
1424 case IP_RETOPTS:
1425 case IP_PKTINFO:
1426 case IP_MTU_DISCOVER:
1427 case IP_RECVERR:
1428 case IP_RECVTOS:
1429 #ifdef IP_FREEBIND
1430 case IP_FREEBIND:
1431 #endif
1432 case IP_MULTICAST_TTL:
1433 case IP_MULTICAST_LOOP:
1434 val = 0;
1435 if (optlen >= sizeof(uint32_t)) {
1436 if (get_user_u32(val, optval_addr))
1437 return -TARGET_EFAULT;
1438 } else if (optlen >= 1) {
1439 if (get_user_u8(val, optval_addr))
1440 return -TARGET_EFAULT;
1441 }
1442 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1443 break;
1444 case IP_ADD_MEMBERSHIP:
1445 case IP_DROP_MEMBERSHIP:
1446 if (optlen < sizeof (struct target_ip_mreq) ||
1447 optlen > sizeof (struct target_ip_mreqn))
1448 return -TARGET_EINVAL;
1449
1450 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1451 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1452 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1453 break;
1454
1455 case IP_BLOCK_SOURCE:
1456 case IP_UNBLOCK_SOURCE:
1457 case IP_ADD_SOURCE_MEMBERSHIP:
1458 case IP_DROP_SOURCE_MEMBERSHIP:
1459 if (optlen != sizeof (struct target_ip_mreq_source))
1460 return -TARGET_EINVAL;
1461
1462 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1463 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1464 unlock_user (ip_mreq_source, optval_addr, 0);
1465 break;
1466
1467 default:
1468 goto unimplemented;
1469 }
1470 break;
1471 case SOL_RAW:
1472 switch (optname) {
1473 case ICMP_FILTER:
1474 /* struct icmp_filter takes an u32 value */
1475 if (optlen < sizeof(uint32_t)) {
1476 return -TARGET_EINVAL;
1477 }
1478
1479 if (get_user_u32(val, optval_addr)) {
1480 return -TARGET_EFAULT;
1481 }
1482 ret = get_errno(setsockopt(sockfd, level, optname,
1483 &val, sizeof(val)));
1484 break;
1485
1486 default:
1487 goto unimplemented;
1488 }
1489 break;
1490 case TARGET_SOL_SOCKET:
1491 switch (optname) {
1492 case TARGET_SO_RCVTIMEO:
1493 {
1494 struct timeval tv;
1495
1496 optname = SO_RCVTIMEO;
1497
1498 set_timeout:
1499 if (optlen != sizeof(struct target_timeval)) {
1500 return -TARGET_EINVAL;
1501 }
1502
1503 if (copy_from_user_timeval(&tv, optval_addr)) {
1504 return -TARGET_EFAULT;
1505 }
1506
1507 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1508 &tv, sizeof(tv)));
1509 return ret;
1510 }
1511 case TARGET_SO_SNDTIMEO:
1512 optname = SO_SNDTIMEO;
1513 goto set_timeout;
1514 /* Options with 'int' argument. */
1515 case TARGET_SO_DEBUG:
1516 optname = SO_DEBUG;
1517 break;
1518 case TARGET_SO_REUSEADDR:
1519 optname = SO_REUSEADDR;
1520 break;
1521 case TARGET_SO_TYPE:
1522 optname = SO_TYPE;
1523 break;
1524 case TARGET_SO_ERROR:
1525 optname = SO_ERROR;
1526 break;
1527 case TARGET_SO_DONTROUTE:
1528 optname = SO_DONTROUTE;
1529 break;
1530 case TARGET_SO_BROADCAST:
1531 optname = SO_BROADCAST;
1532 break;
1533 case TARGET_SO_SNDBUF:
1534 optname = SO_SNDBUF;
1535 break;
1536 case TARGET_SO_RCVBUF:
1537 optname = SO_RCVBUF;
1538 break;
1539 case TARGET_SO_KEEPALIVE:
1540 optname = SO_KEEPALIVE;
1541 break;
1542 case TARGET_SO_OOBINLINE:
1543 optname = SO_OOBINLINE;
1544 break;
1545 case TARGET_SO_NO_CHECK:
1546 optname = SO_NO_CHECK;
1547 break;
1548 case TARGET_SO_PRIORITY:
1549 optname = SO_PRIORITY;
1550 break;
1551 #ifdef SO_BSDCOMPAT
1552 case TARGET_SO_BSDCOMPAT:
1553 optname = SO_BSDCOMPAT;
1554 break;
1555 #endif
1556 case TARGET_SO_PASSCRED:
1557 optname = SO_PASSCRED;
1558 break;
1559 case TARGET_SO_TIMESTAMP:
1560 optname = SO_TIMESTAMP;
1561 break;
1562 case TARGET_SO_RCVLOWAT:
1563 optname = SO_RCVLOWAT;
1564 break;
1565 break;
1566 default:
1567 goto unimplemented;
1568 }
1569 if (optlen < sizeof(uint32_t))
1570 return -TARGET_EINVAL;
1571
1572 if (get_user_u32(val, optval_addr))
1573 return -TARGET_EFAULT;
1574 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1575 break;
1576 default:
1577 unimplemented:
1578 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1579 ret = -TARGET_ENOPROTOOPT;
1580 }
1581 return ret;
1582 }
1583
1584 /* do_getsockopt() Must return target values and target errnos. */
1585 static abi_long do_getsockopt(int sockfd, int level, int optname,
1586 abi_ulong optval_addr, abi_ulong optlen)
1587 {
1588 abi_long ret;
1589 int len, val;
1590 socklen_t lv;
1591
1592 switch(level) {
1593 case TARGET_SOL_SOCKET:
1594 level = SOL_SOCKET;
1595 switch (optname) {
1596 /* These don't just return a single integer */
1597 case TARGET_SO_LINGER:
1598 case TARGET_SO_RCVTIMEO:
1599 case TARGET_SO_SNDTIMEO:
1600 case TARGET_SO_PEERNAME:
1601 goto unimplemented;
1602 case TARGET_SO_PEERCRED: {
1603 struct ucred cr;
1604 socklen_t crlen;
1605 struct target_ucred *tcr;
1606
1607 if (get_user_u32(len, optlen)) {
1608 return -TARGET_EFAULT;
1609 }
1610 if (len < 0) {
1611 return -TARGET_EINVAL;
1612 }
1613
1614 crlen = sizeof(cr);
1615 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1616 &cr, &crlen));
1617 if (ret < 0) {
1618 return ret;
1619 }
1620 if (len > crlen) {
1621 len = crlen;
1622 }
1623 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1624 return -TARGET_EFAULT;
1625 }
1626 __put_user(cr.pid, &tcr->pid);
1627 __put_user(cr.uid, &tcr->uid);
1628 __put_user(cr.gid, &tcr->gid);
1629 unlock_user_struct(tcr, optval_addr, 1);
1630 if (put_user_u32(len, optlen)) {
1631 return -TARGET_EFAULT;
1632 }
1633 break;
1634 }
1635 /* Options with 'int' argument. */
1636 case TARGET_SO_DEBUG:
1637 optname = SO_DEBUG;
1638 goto int_case;
1639 case TARGET_SO_REUSEADDR:
1640 optname = SO_REUSEADDR;
1641 goto int_case;
1642 case TARGET_SO_TYPE:
1643 optname = SO_TYPE;
1644 goto int_case;
1645 case TARGET_SO_ERROR:
1646 optname = SO_ERROR;
1647 goto int_case;
1648 case TARGET_SO_DONTROUTE:
1649 optname = SO_DONTROUTE;
1650 goto int_case;
1651 case TARGET_SO_BROADCAST:
1652 optname = SO_BROADCAST;
1653 goto int_case;
1654 case TARGET_SO_SNDBUF:
1655 optname = SO_SNDBUF;
1656 goto int_case;
1657 case TARGET_SO_RCVBUF:
1658 optname = SO_RCVBUF;
1659 goto int_case;
1660 case TARGET_SO_KEEPALIVE:
1661 optname = SO_KEEPALIVE;
1662 goto int_case;
1663 case TARGET_SO_OOBINLINE:
1664 optname = SO_OOBINLINE;
1665 goto int_case;
1666 case TARGET_SO_NO_CHECK:
1667 optname = SO_NO_CHECK;
1668 goto int_case;
1669 case TARGET_SO_PRIORITY:
1670 optname = SO_PRIORITY;
1671 goto int_case;
1672 #ifdef SO_BSDCOMPAT
1673 case TARGET_SO_BSDCOMPAT:
1674 optname = SO_BSDCOMPAT;
1675 goto int_case;
1676 #endif
1677 case TARGET_SO_PASSCRED:
1678 optname = SO_PASSCRED;
1679 goto int_case;
1680 case TARGET_SO_TIMESTAMP:
1681 optname = SO_TIMESTAMP;
1682 goto int_case;
1683 case TARGET_SO_RCVLOWAT:
1684 optname = SO_RCVLOWAT;
1685 goto int_case;
1686 default:
1687 goto int_case;
1688 }
1689 break;
1690 case SOL_TCP:
1691 /* TCP options all take an 'int' value. */
1692 int_case:
1693 if (get_user_u32(len, optlen))
1694 return -TARGET_EFAULT;
1695 if (len < 0)
1696 return -TARGET_EINVAL;
1697 lv = sizeof(lv);
1698 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1699 if (ret < 0)
1700 return ret;
1701 if (len > lv)
1702 len = lv;
1703 if (len == 4) {
1704 if (put_user_u32(val, optval_addr))
1705 return -TARGET_EFAULT;
1706 } else {
1707 if (put_user_u8(val, optval_addr))
1708 return -TARGET_EFAULT;
1709 }
1710 if (put_user_u32(len, optlen))
1711 return -TARGET_EFAULT;
1712 break;
1713 case SOL_IP:
1714 switch(optname) {
1715 case IP_TOS:
1716 case IP_TTL:
1717 case IP_HDRINCL:
1718 case IP_ROUTER_ALERT:
1719 case IP_RECVOPTS:
1720 case IP_RETOPTS:
1721 case IP_PKTINFO:
1722 case IP_MTU_DISCOVER:
1723 case IP_RECVERR:
1724 case IP_RECVTOS:
1725 #ifdef IP_FREEBIND
1726 case IP_FREEBIND:
1727 #endif
1728 case IP_MULTICAST_TTL:
1729 case IP_MULTICAST_LOOP:
1730 if (get_user_u32(len, optlen))
1731 return -TARGET_EFAULT;
1732 if (len < 0)
1733 return -TARGET_EINVAL;
1734 lv = sizeof(lv);
1735 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1736 if (ret < 0)
1737 return ret;
1738 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1739 len = 1;
1740 if (put_user_u32(len, optlen)
1741 || put_user_u8(val, optval_addr))
1742 return -TARGET_EFAULT;
1743 } else {
1744 if (len > sizeof(int))
1745 len = sizeof(int);
1746 if (put_user_u32(len, optlen)
1747 || put_user_u32(val, optval_addr))
1748 return -TARGET_EFAULT;
1749 }
1750 break;
1751 default:
1752 ret = -TARGET_ENOPROTOOPT;
1753 break;
1754 }
1755 break;
1756 default:
1757 unimplemented:
1758 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1759 level, optname);
1760 ret = -TARGET_EOPNOTSUPP;
1761 break;
1762 }
1763 return ret;
1764 }
1765
1766 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1767 int count, int copy)
1768 {
1769 struct target_iovec *target_vec;
1770 struct iovec *vec;
1771 abi_ulong total_len, max_len;
1772 int i;
1773
1774 if (count == 0) {
1775 errno = 0;
1776 return NULL;
1777 }
1778 if (count > IOV_MAX) {
1779 errno = EINVAL;
1780 return NULL;
1781 }
1782
1783 vec = calloc(count, sizeof(struct iovec));
1784 if (vec == NULL) {
1785 errno = ENOMEM;
1786 return NULL;
1787 }
1788
1789 target_vec = lock_user(VERIFY_READ, target_addr,
1790 count * sizeof(struct target_iovec), 1);
1791 if (target_vec == NULL) {
1792 errno = EFAULT;
1793 goto fail2;
1794 }
1795
1796 /* ??? If host page size > target page size, this will result in a
1797 value larger than what we can actually support. */
1798 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1799 total_len = 0;
1800
1801 for (i = 0; i < count; i++) {
1802 abi_ulong base = tswapal(target_vec[i].iov_base);
1803 abi_long len = tswapal(target_vec[i].iov_len);
1804
1805 if (len < 0) {
1806 errno = EINVAL;
1807 goto fail;
1808 } else if (len == 0) {
1809 /* Zero length pointer is ignored. */
1810 vec[i].iov_base = 0;
1811 } else {
1812 vec[i].iov_base = lock_user(type, base, len, copy);
1813 if (!vec[i].iov_base) {
1814 errno = EFAULT;
1815 goto fail;
1816 }
1817 if (len > max_len - total_len) {
1818 len = max_len - total_len;
1819 }
1820 }
1821 vec[i].iov_len = len;
1822 total_len += len;
1823 }
1824
1825 unlock_user(target_vec, target_addr, 0);
1826 return vec;
1827
1828 fail:
1829 free(vec);
1830 fail2:
1831 unlock_user(target_vec, target_addr, 0);
1832 return NULL;
1833 }
1834
1835 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1836 int count, int copy)
1837 {
1838 struct target_iovec *target_vec;
1839 int i;
1840
1841 target_vec = lock_user(VERIFY_READ, target_addr,
1842 count * sizeof(struct target_iovec), 1);
1843 if (target_vec) {
1844 for (i = 0; i < count; i++) {
1845 abi_ulong base = tswapal(target_vec[i].iov_base);
1846 abi_long len = tswapal(target_vec[i].iov_base);
1847 if (len < 0) {
1848 break;
1849 }
1850 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1851 }
1852 unlock_user(target_vec, target_addr, 0);
1853 }
1854
1855 free(vec);
1856 }
1857
1858 /* do_socket() Must return target values and target errnos. */
1859 static abi_long do_socket(int domain, int type, int protocol)
1860 {
1861 #if defined(TARGET_MIPS)
1862 switch(type) {
1863 case TARGET_SOCK_DGRAM:
1864 type = SOCK_DGRAM;
1865 break;
1866 case TARGET_SOCK_STREAM:
1867 type = SOCK_STREAM;
1868 break;
1869 case TARGET_SOCK_RAW:
1870 type = SOCK_RAW;
1871 break;
1872 case TARGET_SOCK_RDM:
1873 type = SOCK_RDM;
1874 break;
1875 case TARGET_SOCK_SEQPACKET:
1876 type = SOCK_SEQPACKET;
1877 break;
1878 case TARGET_SOCK_PACKET:
1879 type = SOCK_PACKET;
1880 break;
1881 }
1882 #endif
1883 if (domain == PF_NETLINK)
1884 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1885 return get_errno(socket(domain, type, protocol));
1886 }
1887
1888 /* do_bind() Must return target values and target errnos. */
1889 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1890 socklen_t addrlen)
1891 {
1892 void *addr;
1893 abi_long ret;
1894
1895 if ((int)addrlen < 0) {
1896 return -TARGET_EINVAL;
1897 }
1898
1899 addr = alloca(addrlen+1);
1900
1901 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1902 if (ret)
1903 return ret;
1904
1905 return get_errno(bind(sockfd, addr, addrlen));
1906 }
1907
1908 /* do_connect() Must return target values and target errnos. */
1909 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1910 socklen_t addrlen)
1911 {
1912 void *addr;
1913 abi_long ret;
1914
1915 if ((int)addrlen < 0) {
1916 return -TARGET_EINVAL;
1917 }
1918
1919 addr = alloca(addrlen);
1920
1921 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1922 if (ret)
1923 return ret;
1924
1925 return get_errno(connect(sockfd, addr, addrlen));
1926 }
1927
1928 /* do_sendrecvmsg() Must return target values and target errnos. */
1929 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1930 int flags, int send)
1931 {
1932 abi_long ret, len;
1933 struct target_msghdr *msgp;
1934 struct msghdr msg;
1935 int count;
1936 struct iovec *vec;
1937 abi_ulong target_vec;
1938
1939 /* FIXME */
1940 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1941 msgp,
1942 target_msg,
1943 send ? 1 : 0))
1944 return -TARGET_EFAULT;
1945 if (msgp->msg_name) {
1946 msg.msg_namelen = tswap32(msgp->msg_namelen);
1947 msg.msg_name = alloca(msg.msg_namelen);
1948 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1949 msg.msg_namelen);
1950 if (ret) {
1951 goto out2;
1952 }
1953 } else {
1954 msg.msg_name = NULL;
1955 msg.msg_namelen = 0;
1956 }
1957 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1958 msg.msg_control = alloca(msg.msg_controllen);
1959 msg.msg_flags = tswap32(msgp->msg_flags);
1960
1961 count = tswapal(msgp->msg_iovlen);
1962 target_vec = tswapal(msgp->msg_iov);
1963 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1964 target_vec, count, send);
1965 if (vec == NULL) {
1966 ret = -host_to_target_errno(errno);
1967 goto out2;
1968 }
1969 msg.msg_iovlen = count;
1970 msg.msg_iov = vec;
1971
1972 if (send) {
1973 ret = target_to_host_cmsg(&msg, msgp);
1974 if (ret == 0)
1975 ret = get_errno(sendmsg(fd, &msg, flags));
1976 } else {
1977 ret = get_errno(recvmsg(fd, &msg, flags));
1978 if (!is_error(ret)) {
1979 len = ret;
1980 ret = host_to_target_cmsg(msgp, &msg);
1981 if (!is_error(ret)) {
1982 msgp->msg_namelen = tswap32(msg.msg_namelen);
1983 if (msg.msg_name != NULL) {
1984 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1985 msg.msg_name, msg.msg_namelen);
1986 if (ret) {
1987 goto out;
1988 }
1989 }
1990
1991 ret = len;
1992 }
1993 }
1994 }
1995
1996 out:
1997 unlock_iovec(vec, target_vec, count, !send);
1998 out2:
1999 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2000 return ret;
2001 }
2002
2003 /* do_accept() Must return target values and target errnos. */
2004 static abi_long do_accept(int fd, abi_ulong target_addr,
2005 abi_ulong target_addrlen_addr)
2006 {
2007 socklen_t addrlen;
2008 void *addr;
2009 abi_long ret;
2010
2011 if (target_addr == 0)
2012 return get_errno(accept(fd, NULL, NULL));
2013
2014 /* linux returns EINVAL if addrlen pointer is invalid */
2015 if (get_user_u32(addrlen, target_addrlen_addr))
2016 return -TARGET_EINVAL;
2017
2018 if ((int)addrlen < 0) {
2019 return -TARGET_EINVAL;
2020 }
2021
2022 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2023 return -TARGET_EINVAL;
2024
2025 addr = alloca(addrlen);
2026
2027 ret = get_errno(accept(fd, addr, &addrlen));
2028 if (!is_error(ret)) {
2029 host_to_target_sockaddr(target_addr, addr, addrlen);
2030 if (put_user_u32(addrlen, target_addrlen_addr))
2031 ret = -TARGET_EFAULT;
2032 }
2033 return ret;
2034 }
2035
2036 /* do_getpeername() Must return target values and target errnos. */
2037 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2038 abi_ulong target_addrlen_addr)
2039 {
2040 socklen_t addrlen;
2041 void *addr;
2042 abi_long ret;
2043
2044 if (get_user_u32(addrlen, target_addrlen_addr))
2045 return -TARGET_EFAULT;
2046
2047 if ((int)addrlen < 0) {
2048 return -TARGET_EINVAL;
2049 }
2050
2051 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2052 return -TARGET_EFAULT;
2053
2054 addr = alloca(addrlen);
2055
2056 ret = get_errno(getpeername(fd, addr, &addrlen));
2057 if (!is_error(ret)) {
2058 host_to_target_sockaddr(target_addr, addr, addrlen);
2059 if (put_user_u32(addrlen, target_addrlen_addr))
2060 ret = -TARGET_EFAULT;
2061 }
2062 return ret;
2063 }
2064
2065 /* do_getsockname() Must return target values and target errnos. */
2066 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2067 abi_ulong target_addrlen_addr)
2068 {
2069 socklen_t addrlen;
2070 void *addr;
2071 abi_long ret;
2072
2073 if (get_user_u32(addrlen, target_addrlen_addr))
2074 return -TARGET_EFAULT;
2075
2076 if ((int)addrlen < 0) {
2077 return -TARGET_EINVAL;
2078 }
2079
2080 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2081 return -TARGET_EFAULT;
2082
2083 addr = alloca(addrlen);
2084
2085 ret = get_errno(getsockname(fd, addr, &addrlen));
2086 if (!is_error(ret)) {
2087 host_to_target_sockaddr(target_addr, addr, addrlen);
2088 if (put_user_u32(addrlen, target_addrlen_addr))
2089 ret = -TARGET_EFAULT;
2090 }
2091 return ret;
2092 }
2093
2094 /* do_socketpair() Must return target values and target errnos. */
2095 static abi_long do_socketpair(int domain, int type, int protocol,
2096 abi_ulong target_tab_addr)
2097 {
2098 int tab[2];
2099 abi_long ret;
2100
2101 ret = get_errno(socketpair(domain, type, protocol, tab));
2102 if (!is_error(ret)) {
2103 if (put_user_s32(tab[0], target_tab_addr)
2104 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2105 ret = -TARGET_EFAULT;
2106 }
2107 return ret;
2108 }
2109
2110 /* do_sendto() Must return target values and target errnos. */
2111 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2112 abi_ulong target_addr, socklen_t addrlen)
2113 {
2114 void *addr;
2115 void *host_msg;
2116 abi_long ret;
2117
2118 if ((int)addrlen < 0) {
2119 return -TARGET_EINVAL;
2120 }
2121
2122 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2123 if (!host_msg)
2124 return -TARGET_EFAULT;
2125 if (target_addr) {
2126 addr = alloca(addrlen);
2127 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2128 if (ret) {
2129 unlock_user(host_msg, msg, 0);
2130 return ret;
2131 }
2132 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2133 } else {
2134 ret = get_errno(send(fd, host_msg, len, flags));
2135 }
2136 unlock_user(host_msg, msg, 0);
2137 return ret;
2138 }
2139
2140 /* do_recvfrom() Must return target values and target errnos. */
2141 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2142 abi_ulong target_addr,
2143 abi_ulong target_addrlen)
2144 {
2145 socklen_t addrlen;
2146 void *addr;
2147 void *host_msg;
2148 abi_long ret;
2149
2150 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2151 if (!host_msg)
2152 return -TARGET_EFAULT;
2153 if (target_addr) {
2154 if (get_user_u32(addrlen, target_addrlen)) {
2155 ret = -TARGET_EFAULT;
2156 goto fail;
2157 }
2158 if ((int)addrlen < 0) {
2159 ret = -TARGET_EINVAL;
2160 goto fail;
2161 }
2162 addr = alloca(addrlen);
2163 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2164 } else {
2165 addr = NULL; /* To keep compiler quiet. */
2166 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2167 }
2168 if (!is_error(ret)) {
2169 if (target_addr) {
2170 host_to_target_sockaddr(target_addr, addr, addrlen);
2171 if (put_user_u32(addrlen, target_addrlen)) {
2172 ret = -TARGET_EFAULT;
2173 goto fail;
2174 }
2175 }
2176 unlock_user(host_msg, msg, len);
2177 } else {
2178 fail:
2179 unlock_user(host_msg, msg, 0);
2180 }
2181 return ret;
2182 }
2183
2184 #ifdef TARGET_NR_socketcall
2185 /* do_socketcall() Must return target values and target errnos. */
2186 static abi_long do_socketcall(int num, abi_ulong vptr)
2187 {
2188 abi_long ret;
2189 const int n = sizeof(abi_ulong);
2190
2191 switch(num) {
2192 case SOCKOP_socket:
2193 {
2194 abi_ulong domain, type, protocol;
2195
2196 if (get_user_ual(domain, vptr)
2197 || get_user_ual(type, vptr + n)
2198 || get_user_ual(protocol, vptr + 2 * n))
2199 return -TARGET_EFAULT;
2200
2201 ret = do_socket(domain, type, protocol);
2202 }
2203 break;
2204 case SOCKOP_bind:
2205 {
2206 abi_ulong sockfd;
2207 abi_ulong target_addr;
2208 socklen_t addrlen;
2209
2210 if (get_user_ual(sockfd, vptr)
2211 || get_user_ual(target_addr, vptr + n)
2212 || get_user_ual(addrlen, vptr + 2 * n))
2213 return -TARGET_EFAULT;
2214
2215 ret = do_bind(sockfd, target_addr, addrlen);
2216 }
2217 break;
2218 case SOCKOP_connect:
2219 {
2220 abi_ulong sockfd;
2221 abi_ulong target_addr;
2222 socklen_t addrlen;
2223
2224 if (get_user_ual(sockfd, vptr)
2225 || get_user_ual(target_addr, vptr + n)
2226 || get_user_ual(addrlen, vptr + 2 * n))
2227 return -TARGET_EFAULT;
2228
2229 ret = do_connect(sockfd, target_addr, addrlen);
2230 }
2231 break;
2232 case SOCKOP_listen:
2233 {
2234 abi_ulong sockfd, backlog;
2235
2236 if (get_user_ual(sockfd, vptr)
2237 || get_user_ual(backlog, vptr + n))
2238 return -TARGET_EFAULT;
2239
2240 ret = get_errno(listen(sockfd, backlog));
2241 }
2242 break;
2243 case SOCKOP_accept:
2244 {
2245 abi_ulong sockfd;
2246 abi_ulong target_addr, target_addrlen;
2247
2248 if (get_user_ual(sockfd, vptr)
2249 || get_user_ual(target_addr, vptr + n)
2250 || get_user_ual(target_addrlen, vptr + 2 * n))
2251 return -TARGET_EFAULT;
2252
2253 ret = do_accept(sockfd, target_addr, target_addrlen);
2254 }
2255 break;
2256 case SOCKOP_getsockname:
2257 {
2258 abi_ulong sockfd;
2259 abi_ulong target_addr, target_addrlen;
2260
2261 if (get_user_ual(sockfd, vptr)
2262 || get_user_ual(target_addr, vptr + n)
2263 || get_user_ual(target_addrlen, vptr + 2 * n))
2264 return -TARGET_EFAULT;
2265
2266 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2267 }
2268 break;
2269 case SOCKOP_getpeername:
2270 {
2271 abi_ulong sockfd;
2272 abi_ulong target_addr, target_addrlen;
2273
2274 if (get_user_ual(sockfd, vptr)
2275 || get_user_ual(target_addr, vptr + n)
2276 || get_user_ual(target_addrlen, vptr + 2 * n))
2277 return -TARGET_EFAULT;
2278
2279 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2280 }
2281 break;
2282 case SOCKOP_socketpair:
2283 {
2284 abi_ulong domain, type, protocol;
2285 abi_ulong tab;
2286
2287 if (get_user_ual(domain, vptr)
2288 || get_user_ual(type, vptr + n)
2289 || get_user_ual(protocol, vptr + 2 * n)
2290 || get_user_ual(tab, vptr + 3 * n))
2291 return -TARGET_EFAULT;
2292
2293 ret = do_socketpair(domain, type, protocol, tab);
2294 }
2295 break;
2296 case SOCKOP_send:
2297 {
2298 abi_ulong sockfd;
2299 abi_ulong msg;
2300 size_t len;
2301 abi_ulong flags;
2302
2303 if (get_user_ual(sockfd, vptr)
2304 || get_user_ual(msg, vptr + n)
2305 || get_user_ual(len, vptr + 2 * n)
2306 || get_user_ual(flags, vptr + 3 * n))
2307 return -TARGET_EFAULT;
2308
2309 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2310 }
2311 break;
2312 case SOCKOP_recv:
2313 {
2314 abi_ulong sockfd;
2315 abi_ulong msg;
2316 size_t len;
2317 abi_ulong flags;
2318
2319 if (get_user_ual(sockfd, vptr)
2320 || get_user_ual(msg, vptr + n)
2321 || get_user_ual(len, vptr + 2 * n)
2322 || get_user_ual(flags, vptr + 3 * n))
2323 return -TARGET_EFAULT;
2324
2325 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2326 }
2327 break;
2328 case SOCKOP_sendto:
2329 {
2330 abi_ulong sockfd;
2331 abi_ulong msg;
2332 size_t len;
2333 abi_ulong flags;
2334 abi_ulong addr;
2335 socklen_t addrlen;
2336
2337 if (get_user_ual(sockfd, vptr)
2338 || get_user_ual(msg, vptr + n)
2339 || get_user_ual(len, vptr + 2 * n)
2340 || get_user_ual(flags, vptr + 3 * n)
2341 || get_user_ual(addr, vptr + 4 * n)
2342 || get_user_ual(addrlen, vptr + 5 * n))
2343 return -TARGET_EFAULT;
2344
2345 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2346 }
2347 break;
2348 case SOCKOP_recvfrom:
2349 {
2350 abi_ulong sockfd;
2351 abi_ulong msg;
2352 size_t len;
2353 abi_ulong flags;
2354 abi_ulong addr;
2355 socklen_t addrlen;
2356
2357 if (get_user_ual(sockfd, vptr)
2358 || get_user_ual(msg, vptr + n)
2359 || get_user_ual(len, vptr + 2 * n)
2360 || get_user_ual(flags, vptr + 3 * n)
2361 || get_user_ual(addr, vptr + 4 * n)
2362 || get_user_ual(addrlen, vptr + 5 * n))
2363 return -TARGET_EFAULT;
2364
2365 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2366 }
2367 break;
2368 case SOCKOP_shutdown:
2369 {
2370 abi_ulong sockfd, how;
2371
2372 if (get_user_ual(sockfd, vptr)
2373 || get_user_ual(how, vptr + n))
2374 return -TARGET_EFAULT;
2375
2376 ret = get_errno(shutdown(sockfd, how));
2377 }
2378 break;
2379 case SOCKOP_sendmsg:
2380 case SOCKOP_recvmsg:
2381 {
2382 abi_ulong fd;
2383 abi_ulong target_msg;
2384 abi_ulong flags;
2385
2386 if (get_user_ual(fd, vptr)
2387 || get_user_ual(target_msg, vptr + n)
2388 || get_user_ual(flags, vptr + 2 * n))
2389 return -TARGET_EFAULT;
2390
2391 ret = do_sendrecvmsg(fd, target_msg, flags,
2392 (num == SOCKOP_sendmsg));
2393 }
2394 break;
2395 case SOCKOP_setsockopt:
2396 {
2397 abi_ulong sockfd;
2398 abi_ulong level;
2399 abi_ulong optname;
2400 abi_ulong optval;
2401 socklen_t optlen;
2402
2403 if (get_user_ual(sockfd, vptr)
2404 || get_user_ual(level, vptr + n)
2405 || get_user_ual(optname, vptr + 2 * n)
2406 || get_user_ual(optval, vptr + 3 * n)
2407 || get_user_ual(optlen, vptr + 4 * n))
2408 return -TARGET_EFAULT;
2409
2410 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2411 }
2412 break;
2413 case SOCKOP_getsockopt:
2414 {
2415 abi_ulong sockfd;
2416 abi_ulong level;
2417 abi_ulong optname;
2418 abi_ulong optval;
2419 socklen_t optlen;
2420
2421 if (get_user_ual(sockfd, vptr)
2422 || get_user_ual(level, vptr + n)
2423 || get_user_ual(optname, vptr + 2 * n)
2424 || get_user_ual(optval, vptr + 3 * n)
2425 || get_user_ual(optlen, vptr + 4 * n))
2426 return -TARGET_EFAULT;
2427
2428 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2429 }
2430 break;
2431 default:
2432 gemu_log("Unsupported socketcall: %d\n", num);
2433 ret = -TARGET_ENOSYS;
2434 break;
2435 }
2436 return ret;
2437 }
2438 #endif
2439
2440 #define N_SHM_REGIONS 32
2441
2442 static struct shm_region {
2443 abi_ulong start;
2444 abi_ulong size;
2445 } shm_regions[N_SHM_REGIONS];
2446
2447 struct target_ipc_perm
2448 {
2449 abi_long __key;
2450 abi_ulong uid;
2451 abi_ulong gid;
2452 abi_ulong cuid;
2453 abi_ulong cgid;
2454 unsigned short int mode;
2455 unsigned short int __pad1;
2456 unsigned short int __seq;
2457 unsigned short int __pad2;
2458 abi_ulong __unused1;
2459 abi_ulong __unused2;
2460 };
2461
2462 struct target_semid_ds
2463 {
2464 struct target_ipc_perm sem_perm;
2465 abi_ulong sem_otime;
2466 abi_ulong __unused1;
2467 abi_ulong sem_ctime;
2468 abi_ulong __unused2;
2469 abi_ulong sem_nsems;
2470 abi_ulong __unused3;
2471 abi_ulong __unused4;
2472 };
2473
2474 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2475 abi_ulong target_addr)
2476 {
2477 struct target_ipc_perm *target_ip;
2478 struct target_semid_ds *target_sd;
2479
2480 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2481 return -TARGET_EFAULT;
2482 target_ip = &(target_sd->sem_perm);
2483 host_ip->__key = tswapal(target_ip->__key);
2484 host_ip->uid = tswapal(target_ip->uid);
2485 host_ip->gid = tswapal(target_ip->gid);
2486 host_ip->cuid = tswapal(target_ip->cuid);
2487 host_ip->cgid = tswapal(target_ip->cgid);
2488 host_ip->mode = tswap16(target_ip->mode);
2489 unlock_user_struct(target_sd, target_addr, 0);
2490 return 0;
2491 }
2492
2493 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2494 struct ipc_perm *host_ip)
2495 {
2496 struct target_ipc_perm *target_ip;
2497 struct target_semid_ds *target_sd;
2498
2499 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2500 return -TARGET_EFAULT;
2501 target_ip = &(target_sd->sem_perm);
2502 target_ip->__key = tswapal(host_ip->__key);
2503 target_ip->uid = tswapal(host_ip->uid);
2504 target_ip->gid = tswapal(host_ip->gid);
2505 target_ip->cuid = tswapal(host_ip->cuid);
2506 target_ip->cgid = tswapal(host_ip->cgid);
2507 target_ip->mode = tswap16(host_ip->mode);
2508 unlock_user_struct(target_sd, target_addr, 1);
2509 return 0;
2510 }
2511
2512 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2513 abi_ulong target_addr)
2514 {
2515 struct target_semid_ds *target_sd;
2516
2517 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2518 return -TARGET_EFAULT;
2519 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2520 return -TARGET_EFAULT;
2521 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2522 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2523 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2524 unlock_user_struct(target_sd, target_addr, 0);
2525 return 0;
2526 }
2527
2528 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2529 struct semid_ds *host_sd)
2530 {
2531 struct target_semid_ds *target_sd;
2532
2533 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2534 return -TARGET_EFAULT;
2535 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2536 return -TARGET_EFAULT;
2537 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2538 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2539 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2540 unlock_user_struct(target_sd, target_addr, 1);
2541 return 0;
2542 }
2543
2544 struct target_seminfo {
2545 int semmap;
2546 int semmni;
2547 int semmns;
2548 int semmnu;
2549 int semmsl;
2550 int semopm;
2551 int semume;
2552 int semusz;
2553 int semvmx;
2554 int semaem;
2555 };
2556
2557 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2558 struct seminfo *host_seminfo)
2559 {
2560 struct target_seminfo *target_seminfo;
2561 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2562 return -TARGET_EFAULT;
2563 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2564 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2565 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2566 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2567 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2568 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2569 __put_user(host_seminfo->semume, &target_seminfo->semume);
2570 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2571 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2572 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2573 unlock_user_struct(target_seminfo, target_addr, 1);
2574 return 0;
2575 }
2576
2577 union semun {
2578 int val;
2579 struct semid_ds *buf;
2580 unsigned short *array;
2581 struct seminfo *__buf;
2582 };
2583
2584 union target_semun {
2585 int val;
2586 abi_ulong buf;
2587 abi_ulong array;
2588 abi_ulong __buf;
2589 };
2590
2591 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2592 abi_ulong target_addr)
2593 {
2594 int nsems;
2595 unsigned short *array;
2596 union semun semun;
2597 struct semid_ds semid_ds;
2598 int i, ret;
2599
2600 semun.buf = &semid_ds;
2601
2602 ret = semctl(semid, 0, IPC_STAT, semun);
2603 if (ret == -1)
2604 return get_errno(ret);
2605
2606 nsems = semid_ds.sem_nsems;
2607
2608 *host_array = malloc(nsems*sizeof(unsigned short));
2609 array = lock_user(VERIFY_READ, target_addr,
2610 nsems*sizeof(unsigned short), 1);
2611 if (!array)
2612 return -TARGET_EFAULT;
2613
2614 for(i=0; i<nsems; i++) {
2615 __get_user((*host_array)[i], &array[i]);
2616 }
2617 unlock_user(array, target_addr, 0);
2618
2619 return 0;
2620 }
2621
2622 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2623 unsigned short **host_array)
2624 {
2625 int nsems;
2626 unsigned short *array;
2627 union semun semun;
2628 struct semid_ds semid_ds;
2629 int i, ret;
2630
2631 semun.buf = &semid_ds;
2632
2633 ret = semctl(semid, 0, IPC_STAT, semun);
2634 if (ret == -1)
2635 return get_errno(ret);
2636
2637 nsems = semid_ds.sem_nsems;
2638
2639 array = lock_user(VERIFY_WRITE, target_addr,
2640 nsems*sizeof(unsigned short), 0);
2641 if (!array)
2642 return -TARGET_EFAULT;
2643
2644 for(i=0; i<nsems; i++) {
2645 __put_user((*host_array)[i], &array[i]);
2646 }
2647 free(*host_array);
2648 unlock_user(array, target_addr, 1);
2649
2650 return 0;
2651 }
2652
2653 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2654 union target_semun target_su)
2655 {
2656 union semun arg;
2657 struct semid_ds dsarg;
2658 unsigned short *array = NULL;
2659 struct seminfo seminfo;
2660 abi_long ret = -TARGET_EINVAL;
2661 abi_long err;
2662 cmd &= 0xff;
2663
2664 switch( cmd ) {
2665 case GETVAL:
2666 case SETVAL:
2667 arg.val = tswap32(target_su.val);
2668 ret = get_errno(semctl(semid, semnum, cmd, arg));
2669 target_su.val = tswap32(arg.val);
2670 break;
2671 case GETALL:
2672 case SETALL:
2673 err = target_to_host_semarray(semid, &array, target_su.array);
2674 if (err)
2675 return err;
2676 arg.array = array;
2677 ret = get_errno(semctl(semid, semnum, cmd, arg));
2678 err = host_to_target_semarray(semid, target_su.array, &array);
2679 if (err)
2680 return err;
2681 break;
2682 case IPC_STAT:
2683 case IPC_SET:
2684 case SEM_STAT:
2685 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2686 if (err)
2687 return err;
2688 arg.buf = &dsarg;
2689 ret = get_errno(semctl(semid, semnum, cmd, arg));
2690 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2691 if (err)
2692 return err;
2693 break;
2694 case IPC_INFO:
2695 case SEM_INFO:
2696 arg.__buf = &seminfo;
2697 ret = get_errno(semctl(semid, semnum, cmd, arg));
2698 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2699 if (err)
2700 return err;
2701 break;
2702 case IPC_RMID:
2703 case GETPID:
2704 case GETNCNT:
2705 case GETZCNT:
2706 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2707 break;
2708 }
2709
2710 return ret;
2711 }
2712
2713 struct target_sembuf {
2714 unsigned short sem_num;
2715 short sem_op;
2716 short sem_flg;
2717 };
2718
2719 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2720 abi_ulong target_addr,
2721 unsigned nsops)
2722 {
2723 struct target_sembuf *target_sembuf;
2724 int i;
2725
2726 target_sembuf = lock_user(VERIFY_READ, target_addr,
2727 nsops*sizeof(struct target_sembuf), 1);
2728 if (!target_sembuf)
2729 return -TARGET_EFAULT;
2730
2731 for(i=0; i<nsops; i++) {
2732 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2733 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2734 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2735 }
2736
2737 unlock_user(target_sembuf, target_addr, 0);
2738
2739 return 0;
2740 }
2741
2742 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2743 {
2744 struct sembuf sops[nsops];
2745
2746 if (target_to_host_sembuf(sops, ptr, nsops))
2747 return -TARGET_EFAULT;
2748
2749 return semop(semid, sops, nsops);
2750 }
2751
2752 struct target_msqid_ds
2753 {
2754 struct target_ipc_perm msg_perm;
2755 abi_ulong msg_stime;
2756 #if TARGET_ABI_BITS == 32
2757 abi_ulong __unused1;
2758 #endif
2759 abi_ulong msg_rtime;
2760 #if TARGET_ABI_BITS == 32
2761 abi_ulong __unused2;
2762 #endif
2763 abi_ulong msg_ctime;
2764 #if TARGET_ABI_BITS == 32
2765 abi_ulong __unused3;
2766 #endif
2767 abi_ulong __msg_cbytes;
2768 abi_ulong msg_qnum;
2769 abi_ulong msg_qbytes;
2770 abi_ulong msg_lspid;
2771 abi_ulong msg_lrpid;
2772 abi_ulong __unused4;
2773 abi_ulong __unused5;
2774 };
2775
2776 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2777 abi_ulong target_addr)
2778 {
2779 struct target_msqid_ds *target_md;
2780
2781 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2782 return -TARGET_EFAULT;
2783 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2784 return -TARGET_EFAULT;
2785 host_md->msg_stime = tswapal(target_md->msg_stime);
2786 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2787 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2788 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2789 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2790 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2791 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2792 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2793 unlock_user_struct(target_md, target_addr, 0);
2794 return 0;
2795 }
2796
2797 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2798 struct msqid_ds *host_md)
2799 {
2800 struct target_msqid_ds *target_md;
2801
2802 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2803 return -TARGET_EFAULT;
2804 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2805 return -TARGET_EFAULT;
2806 target_md->msg_stime = tswapal(host_md->msg_stime);
2807 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2808 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2809 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2810 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2811 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2812 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2813 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2814 unlock_user_struct(target_md, target_addr, 1);
2815 return 0;
2816 }
2817
2818 struct target_msginfo {
2819 int msgpool;
2820 int msgmap;
2821 int msgmax;
2822 int msgmnb;
2823 int msgmni;
2824 int msgssz;
2825 int msgtql;
2826 unsigned short int msgseg;
2827 };
2828
2829 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2830 struct msginfo *host_msginfo)
2831 {
2832 struct target_msginfo *target_msginfo;
2833 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2834 return -TARGET_EFAULT;
2835 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2836 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2837 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2838 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2839 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2840 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2841 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2842 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2843 unlock_user_struct(target_msginfo, target_addr, 1);
2844 return 0;
2845 }
2846
2847 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2848 {
2849 struct msqid_ds dsarg;
2850 struct msginfo msginfo;
2851 abi_long ret = -TARGET_EINVAL;
2852
2853 cmd &= 0xff;
2854
2855 switch (cmd) {
2856 case IPC_STAT:
2857 case IPC_SET:
2858 case MSG_STAT:
2859 if (target_to_host_msqid_ds(&dsarg,ptr))
2860 return -TARGET_EFAULT;
2861 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2862 if (host_to_target_msqid_ds(ptr,&dsarg))
2863 return -TARGET_EFAULT;
2864 break;
2865 case IPC_RMID:
2866 ret = get_errno(msgctl(msgid, cmd, NULL));
2867 break;
2868 case IPC_INFO:
2869 case MSG_INFO:
2870 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2871 if (host_to_target_msginfo(ptr, &msginfo))
2872 return -TARGET_EFAULT;
2873 break;
2874 }
2875
2876 return ret;
2877 }
2878
2879 struct target_msgbuf {
2880 abi_long mtype;
2881 char mtext[1];
2882 };
2883
2884 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2885 unsigned int msgsz, int msgflg)
2886 {
2887 struct target_msgbuf *target_mb;
2888 struct msgbuf *host_mb;
2889 abi_long ret = 0;
2890
2891 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2892 return -TARGET_EFAULT;
2893 host_mb = malloc(msgsz+sizeof(long));
2894 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2895 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2896 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2897 free(host_mb);
2898 unlock_user_struct(target_mb, msgp, 0);
2899
2900 return ret;
2901 }
2902
2903 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2904 unsigned int msgsz, abi_long msgtyp,
2905 int msgflg)
2906 {
2907 struct target_msgbuf *target_mb;
2908 char *target_mtext;
2909 struct msgbuf *host_mb;
2910 abi_long ret = 0;
2911
2912 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2913 return -TARGET_EFAULT;
2914
2915 host_mb = g_malloc(msgsz+sizeof(long));
2916 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2917
2918 if (ret > 0) {
2919 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2920 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2921 if (!target_mtext) {
2922 ret = -TARGET_EFAULT;
2923 goto end;
2924 }
2925 memcpy(target_mb->mtext, host_mb->mtext, ret);
2926 unlock_user(target_mtext, target_mtext_addr, ret);
2927 }
2928
2929 target_mb->mtype = tswapal(host_mb->mtype);
2930
2931 end:
2932 if (target_mb)
2933 unlock_user_struct(target_mb, msgp, 1);
2934 g_free(host_mb);
2935 return ret;
2936 }
2937
2938 struct target_shmid_ds
2939 {
2940 struct target_ipc_perm shm_perm;
2941 abi_ulong shm_segsz;
2942 abi_ulong shm_atime;
2943 #if TARGET_ABI_BITS == 32
2944 abi_ulong __unused1;
2945 #endif
2946 abi_ulong shm_dtime;
2947 #if TARGET_ABI_BITS == 32
2948 abi_ulong __unused2;
2949 #endif
2950 abi_ulong shm_ctime;
2951 #if TARGET_ABI_BITS == 32
2952 abi_ulong __unused3;
2953 #endif
2954 int shm_cpid;
2955 int shm_lpid;
2956 abi_ulong shm_nattch;
2957 unsigned long int __unused4;
2958 unsigned long int __unused5;
2959 };
2960
2961 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2962 abi_ulong target_addr)
2963 {
2964 struct target_shmid_ds *target_sd;
2965
2966 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2967 return -TARGET_EFAULT;
2968 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2969 return -TARGET_EFAULT;
2970 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2971 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2972 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2973 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2974 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2975 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2976 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2977 unlock_user_struct(target_sd, target_addr, 0);
2978 return 0;
2979 }
2980
2981 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2982 struct shmid_ds *host_sd)
2983 {
2984 struct target_shmid_ds *target_sd;
2985
2986 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2987 return -TARGET_EFAULT;
2988 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2989 return -TARGET_EFAULT;
2990 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2991 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2992 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2993 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2994 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2995 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2996 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2997 unlock_user_struct(target_sd, target_addr, 1);
2998 return 0;
2999 }
3000
3001 struct target_shminfo {
3002 abi_ulong shmmax;
3003 abi_ulong shmmin;
3004 abi_ulong shmmni;
3005 abi_ulong shmseg;
3006 abi_ulong shmall;
3007 };
3008
3009 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3010 struct shminfo *host_shminfo)
3011 {
3012 struct target_shminfo *target_shminfo;
3013 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3014 return -TARGET_EFAULT;
3015 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3016 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3017 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3018 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3019 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3020 unlock_user_struct(target_shminfo, target_addr, 1);
3021 return 0;
3022 }
3023
3024 struct target_shm_info {
3025 int used_ids;
3026 abi_ulong shm_tot;
3027 abi_ulong shm_rss;
3028 abi_ulong shm_swp;
3029 abi_ulong swap_attempts;
3030 abi_ulong swap_successes;
3031 };
3032
3033 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3034 struct shm_info *host_shm_info)
3035 {
3036 struct target_shm_info *target_shm_info;
3037 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3038 return -TARGET_EFAULT;
3039 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3040 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3041 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3042 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3043 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3044 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3045 unlock_user_struct(target_shm_info, target_addr, 1);
3046 return 0;
3047 }
3048
3049 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3050 {
3051 struct shmid_ds dsarg;
3052 struct shminfo shminfo;
3053 struct shm_info shm_info;
3054 abi_long ret = -TARGET_EINVAL;
3055
3056 cmd &= 0xff;
3057
3058 switch(cmd) {
3059 case IPC_STAT:
3060 case IPC_SET:
3061 case SHM_STAT:
3062 if (target_to_host_shmid_ds(&dsarg, buf))
3063 return -TARGET_EFAULT;
3064 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3065 if (host_to_target_shmid_ds(buf, &dsarg))
3066 return -TARGET_EFAULT;
3067 break;
3068 case IPC_INFO:
3069 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3070 if (host_to_target_shminfo(buf, &shminfo))
3071 return -TARGET_EFAULT;
3072 break;
3073 case SHM_INFO:
3074 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3075 if (host_to_target_shm_info(buf, &shm_info))
3076 return -TARGET_EFAULT;
3077 break;
3078 case IPC_RMID:
3079 case SHM_LOCK:
3080 case SHM_UNLOCK:
3081 ret = get_errno(shmctl(shmid, cmd, NULL));
3082 break;
3083 }
3084
3085 return ret;
3086 }
3087
3088 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3089 {
3090 abi_long raddr;
3091 void *host_raddr;
3092 struct shmid_ds shm_info;
3093 int i,ret;
3094
3095 /* find out the length of the shared memory segment */
3096 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3097 if (is_error(ret)) {
3098 /* can't get length, bail out */
3099 return ret;
3100 }
3101
3102 mmap_lock();
3103
3104 if (shmaddr)
3105 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3106 else {
3107 abi_ulong mmap_start;
3108
3109 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3110
3111 if (mmap_start == -1) {
3112 errno = ENOMEM;
3113 host_raddr = (void *)-1;
3114 } else
3115 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3116 }
3117
3118 if (host_raddr == (void *)-1) {
3119 mmap_unlock();
3120 return get_errno((long)host_raddr);
3121 }
3122 raddr=h2g((unsigned long)host_raddr);
3123
3124 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3125 PAGE_VALID | PAGE_READ |
3126 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3127
3128 for (i = 0; i < N_SHM_REGIONS; i++) {
3129 if (shm_regions[i].start == 0) {
3130 shm_regions[i].start = raddr;
3131 shm_regions[i].size = shm_info.shm_segsz;
3132 break;
3133 }
3134 }
3135
3136 mmap_unlock();
3137 return raddr;
3138
3139 }
3140
3141 static inline abi_long do_shmdt(abi_ulong shmaddr)
3142 {
3143 int i;
3144
3145 for (i = 0; i < N_SHM_REGIONS; ++i) {
3146 if (shm_regions[i].start == shmaddr) {
3147 shm_regions[i].start = 0;
3148 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3149 break;
3150 }
3151 }
3152
3153 return get_errno(shmdt(g2h(shmaddr)));
3154 }
3155
3156 #ifdef TARGET_NR_ipc
3157 /* ??? This only works with linear mappings. */
3158 /* do_ipc() must return target values and target errnos. */
3159 static abi_long do_ipc(unsigned int call, int first,
3160 int second, int third,
3161 abi_long ptr, abi_long fifth)
3162 {
3163 int version;
3164 abi_long ret = 0;
3165
3166 version = call >> 16;
3167 call &= 0xffff;
3168
3169 switch (call) {
3170 case IPCOP_semop:
3171 ret = do_semop(first, ptr, second);
3172 break;
3173
3174 case IPCOP_semget:
3175 ret = get_errno(semget(first, second, third));
3176 break;
3177
3178 case IPCOP_semctl:
3179 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3180 break;
3181
3182 case IPCOP_msgget:
3183 ret = get_errno(msgget(first, second));
3184 break;
3185
3186 case IPCOP_msgsnd:
3187 ret = do_msgsnd(first, ptr, second, third);
3188 break;
3189
3190 case IPCOP_msgctl:
3191 ret = do_msgctl(first, second, ptr);
3192 break;
3193
3194 case IPCOP_msgrcv:
3195 switch (version) {
3196 case 0:
3197 {
3198 struct target_ipc_kludge {
3199 abi_long msgp;
3200 abi_long msgtyp;
3201 } *tmp;
3202
3203 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3204 ret = -TARGET_EFAULT;
3205 break;
3206 }
3207
3208 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3209
3210 unlock_user_struct(tmp, ptr, 0);
3211 break;
3212 }
3213 default:
3214 ret = do_msgrcv(first, ptr, second, fifth, third);
3215 }
3216 break;
3217
3218 case IPCOP_shmat:
3219 switch (version) {
3220 default:
3221 {
3222 abi_ulong raddr;
3223 raddr = do_shmat(first, ptr, second);
3224 if (is_error(raddr))
3225 return get_errno(raddr);
3226 if (put_user_ual(raddr, third))
3227 return -TARGET_EFAULT;
3228 break;
3229 }
3230 case 1:
3231 ret = -TARGET_EINVAL;
3232 break;
3233 }
3234 break;
3235 case IPCOP_shmdt:
3236 ret = do_shmdt(ptr);
3237 break;
3238
3239 case IPCOP_shmget:
3240 /* IPC_* flag values are the same on all linux platforms */
3241 ret = get_errno(shmget(first, second, third));
3242 break;
3243
3244 /* IPC_* and SHM_* command values are the same on all linux platforms */
3245 case IPCOP_shmctl:
3246 ret = do_shmctl(first, second, third);
3247 break;
3248 default:
3249 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3250 ret = -TARGET_ENOSYS;
3251 break;
3252 }
3253 return ret;
3254 }
3255 #endif
3256
3257 /* kernel structure types definitions */
3258
3259 #define STRUCT(name, ...) STRUCT_ ## name,
3260 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3261 enum {
3262 #include "syscall_types.h"
3263 };
3264 #undef STRUCT
3265 #undef STRUCT_SPECIAL
3266
3267 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3268 #define STRUCT_SPECIAL(name)
3269 #include "syscall_types.h"
3270 #undef STRUCT
3271 #undef STRUCT_SPECIAL
3272
3273 typedef struct IOCTLEntry IOCTLEntry;
3274
3275 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3276 int fd, abi_long cmd, abi_long arg);
3277
3278 struct IOCTLEntry {
3279 unsigned int target_cmd;
3280 unsigned int host_cmd;
3281 const char *name;
3282 int access;
3283 do_ioctl_fn *do_ioctl;
3284 const argtype arg_type[5];
3285 };
3286
3287 #define IOC_R 0x0001
3288 #define IOC_W 0x0002
3289 #define IOC_RW (IOC_R | IOC_W)
3290
3291 #define MAX_STRUCT_SIZE 4096
3292
3293 #ifdef CONFIG_FIEMAP
3294 /* So fiemap access checks don't overflow on 32 bit systems.
3295 * This is very slightly smaller than the limit imposed by
3296 * the underlying kernel.
3297 */
3298 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3299 / sizeof(struct fiemap_extent))
3300
3301 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3302 int fd, abi_long cmd, abi_long arg)
3303 {
3304 /* The parameter for this ioctl is a struct fiemap followed
3305 * by an array of struct fiemap_extent whose size is set
3306 * in fiemap->fm_extent_count. The array is filled in by the
3307 * ioctl.
3308 */
3309 int target_size_in, target_size_out;
3310 struct fiemap *fm;
3311 const argtype *arg_type = ie->arg_type;
3312 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3313 void *argptr, *p;
3314 abi_long ret;
3315 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3316 uint32_t outbufsz;
3317 int free_fm = 0;
3318
3319 assert(arg_type[0] == TYPE_PTR);
3320 assert(ie->access == IOC_RW);
3321 arg_type++;
3322 target_size_in = thunk_type_size(arg_type, 0);
3323 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3324 if (!argptr) {
3325 return -TARGET_EFAULT;
3326 }
3327 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3328 unlock_user(argptr, arg, 0);
3329 fm = (struct fiemap *)buf_temp;
3330 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3331 return -TARGET_EINVAL;
3332 }
3333
3334 outbufsz = sizeof (*fm) +
3335 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3336
3337 if (outbufsz > MAX_STRUCT_SIZE) {
3338 /* We can't fit all the extents into the fixed size buffer.
3339 * Allocate one that is large enough and use it instead.
3340 */
3341 fm = malloc(outbufsz);
3342 if (!fm) {
3343 return -TARGET_ENOMEM;
3344 }
3345 memcpy(fm, buf_temp, sizeof(struct fiemap));
3346 free_fm = 1;
3347 }
3348 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3349 if (!is_error(ret)) {
3350 target_size_out = target_size_in;
3351 /* An extent_count of 0 means we were only counting the extents
3352 * so there are no structs to copy
3353 */
3354 if (fm->fm_extent_count != 0) {
3355 target_size_out += fm->fm_mapped_extents * extent_size;
3356 }
3357 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3358 if (!argptr) {
3359 ret = -TARGET_EFAULT;
3360 } else {
3361 /* Convert the struct fiemap */
3362 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3363 if (fm->fm_extent_count != 0) {
3364 p = argptr + target_size_in;
3365 /* ...and then all the struct fiemap_extents */
3366 for (i = 0; i < fm->fm_mapped_extents; i++) {
3367 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3368 THUNK_TARGET);
3369 p += extent_size;
3370 }
3371 }
3372 unlock_user(argptr, arg, target_size_out);
3373 }
3374 }
3375 if (free_fm) {
3376 free(fm);
3377 }
3378 return ret;
3379 }
3380 #endif
3381
3382 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3383 int fd, abi_long cmd, abi_long arg)
3384 {
3385 const argtype *arg_type = ie->arg_type;
3386 int target_size;
3387 void *argptr;
3388 int ret;
3389 struct ifconf *host_ifconf;
3390 uint32_t outbufsz;
3391 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3392 int target_ifreq_size;
3393 int nb_ifreq;
3394 int free_buf = 0;
3395 int i;
3396 int target_ifc_len;
3397 abi_long target_ifc_buf;
3398 int host_ifc_len;
3399 char *host_ifc_buf;
3400
3401 assert(arg_type[0] == TYPE_PTR);
3402 assert(ie->access == IOC_RW);
3403
3404 arg_type++;
3405 target_size = thunk_type_size(arg_type, 0);
3406
3407 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3408 if (!argptr)
3409 return -TARGET_EFAULT;
3410 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3411 unlock_user(argptr, arg, 0);
3412
3413 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3414 target_ifc_len = host_ifconf->ifc_len;
3415 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3416
3417 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3418 nb_ifreq = target_ifc_len / target_ifreq_size;
3419 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3420
3421 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3422 if (outbufsz > MAX_STRUCT_SIZE) {
3423 /* We can't fit all the extents into the fixed size buffer.
3424 * Allocate one that is large enough and use it instead.
3425 */
3426 host_ifconf = malloc(outbufsz);
3427 if (!host_ifconf) {
3428 return -TARGET_ENOMEM;
3429 }
3430 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3431 free_buf = 1;
3432 }
3433 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3434
3435 host_ifconf->ifc_len = host_ifc_len;
3436 host_ifconf->ifc_buf = host_ifc_buf;
3437
3438 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3439 if (!is_error(ret)) {
3440 /* convert host ifc_len to target ifc_len */
3441
3442 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3443 target_ifc_len = nb_ifreq * target_ifreq_size;
3444 host_ifconf->ifc_len = target_ifc_len;
3445
3446 /* restore target ifc_buf */
3447
3448 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3449
3450 /* copy struct ifconf to target user */
3451
3452 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3453 if (!argptr)
3454 return -TARGET_EFAULT;
3455 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3456 unlock_user(argptr, arg, target_size);
3457
3458 /* copy ifreq[] to target user */
3459
3460 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3461 for (i = 0; i < nb_ifreq ; i++) {
3462 thunk_convert(argptr + i * target_ifreq_size,
3463 host_ifc_buf + i * sizeof(struct ifreq),
3464 ifreq_arg_type, THUNK_TARGET);
3465 }
3466 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3467 }
3468
3469 if (free_buf) {
3470 free(host_ifconf);
3471 }
3472
3473 return ret;
3474 }
3475
3476 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3477 abi_long cmd, abi_long arg)
3478 {
3479 void *argptr;
3480 struct dm_ioctl *host_dm;
3481 abi_long guest_data;
3482 uint32_t guest_data_size;
3483 int target_size;
3484 const argtype *arg_type = ie->arg_type;
3485 abi_long ret;
3486 void *big_buf = NULL;
3487 char *host_data;
3488
3489 arg_type++;
3490 target_size = thunk_type_size(arg_type, 0);
3491 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3492 if (!argptr) {
3493 ret = -TARGET_EFAULT;
3494 goto out;
3495 }
3496 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3497 unlock_user(argptr, arg, 0);
3498
3499 /* buf_temp is too small, so fetch things into a bigger buffer */
3500 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3501 memcpy(big_buf, buf_temp, target_size);
3502 buf_temp = big_buf;
3503 host_dm = big_buf;
3504
3505 guest_data = arg + host_dm->data_start;
3506 if ((guest_data - arg) < 0) {
3507 ret = -EINVAL;
3508 goto out;
3509 }
3510 guest_data_size = host_dm->data_size - host_dm->data_start;
3511 host_data = (char*)host_dm + host_dm->data_start;
3512
3513 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3514 switch (ie->host_cmd) {
3515 case DM_REMOVE_ALL:
3516 case DM_LIST_DEVICES:
3517 case DM_DEV_CREATE:
3518 case DM_DEV_REMOVE:
3519 case DM_DEV_SUSPEND:
3520 case DM_DEV_STATUS:
3521 case DM_DEV_WAIT:
3522 case DM_TABLE_STATUS:
3523 case DM_TABLE_CLEAR:
3524 case DM_TABLE_DEPS:
3525 case DM_LIST_VERSIONS:
3526 /* no input data */
3527 break;
3528 case DM_DEV_RENAME:
3529 case DM_DEV_SET_GEOMETRY:
3530 /* data contains only strings */
3531 memcpy(host_data, argptr, guest_data_size);
3532 break;
3533 case DM_TARGET_MSG:
3534 memcpy(host_data, argptr, guest_data_size);
3535 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3536 break;
3537 case DM_TABLE_LOAD:
3538 {
3539 void *gspec = argptr;
3540 void *cur_data = host_data;
3541 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3542 int spec_size = thunk_type_size(arg_type, 0);
3543 int i;
3544
3545 for (i = 0; i < host_dm->target_count; i++) {
3546 struct dm_target_spec *spec = cur_data;
3547 uint32_t next;
3548 int slen;
3549
3550 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3551 slen = strlen((char*)gspec + spec_size) + 1;
3552 next = spec->next;
3553 spec->next = sizeof(*spec) + slen;
3554 strcpy((char*)&spec[1], gspec + spec_size);
3555 gspec += next;
3556 cur_data += spec->next;
3557 }
3558 break;
3559 }
3560 default:
3561 ret = -TARGET_EINVAL;
3562 goto out;
3563 }
3564 unlock_user(argptr, guest_data, 0);
3565
3566 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3567 if (!is_error(ret)) {
3568 guest_data = arg + host_dm->data_start;
3569 guest_data_size = host_dm->data_size - host_dm->data_start;
3570 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3571 switch (ie->host_cmd) {
3572 case DM_REMOVE_ALL:
3573 case DM_DEV_CREATE:
3574 case DM_DEV_REMOVE:
3575 case DM_DEV_RENAME:
3576 case DM_DEV_SUSPEND:
3577 case DM_DEV_STATUS:
3578 case DM_TABLE_LOAD:
3579 case DM_TABLE_CLEAR:
3580 case DM_TARGET_MSG:
3581 case DM_DEV_SET_GEOMETRY:
3582 /* no return data */
3583 break;
3584 case DM_LIST_DEVICES:
3585 {
3586 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3587 uint32_t remaining_data = guest_data_size;
3588 void *cur_data = argptr;
3589 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3590 int nl_size = 12; /* can't use thunk_size due to alignment */
3591
3592 while (1) {
3593 uint32_t next = nl->next;
3594 if (next) {
3595 nl->next = nl_size + (strlen(nl->name) + 1);
3596 }
3597 if (remaining_data < nl->next) {
3598 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3599 break;
3600 }
3601 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3602 strcpy(cur_data + nl_size, nl->name);
3603 cur_data += nl->next;
3604 remaining_data -= nl->next;
3605 if (!next) {
3606 break;
3607 }
3608 nl = (void*)nl + next;
3609 }
3610 break;
3611 }
3612 case DM_DEV_WAIT:
3613 case DM_TABLE_STATUS:
3614 {
3615 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3616 void *cur_data = argptr;
3617 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3618 int spec_size = thunk_type_size(arg_type, 0);
3619 int i;
3620
3621 for (i = 0; i < host_dm->target_count; i++) {
3622 uint32_t next = spec->next;
3623 int slen = strlen((char*)&spec[1]) + 1;
3624 spec->next = (cur_data - argptr) + spec_size + slen;
3625 if (guest_data_size < spec->next) {
3626 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3627 break;
3628 }
3629 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3630 strcpy(cur_data + spec_size, (char*)&spec[1]);
3631 cur_data = argptr + spec->next;
3632 spec = (void*)host_dm + host_dm->data_start + next;
3633 }
3634 break;
3635 }
3636 case DM_TABLE_DEPS:
3637 {
3638 void *hdata = (void*)host_dm + host_dm->data_start;
3639 int count = *(uint32_t*)hdata;
3640 uint64_t *hdev = hdata + 8;
3641 uint64_t *gdev = argptr + 8;
3642 int i;
3643
3644 *(uint32_t*)argptr = tswap32(count);
3645 for (i = 0; i < count; i++) {
3646 *gdev = tswap64(*hdev);
3647 gdev++;
3648 hdev++;
3649 }
3650 break;
3651 }
3652 case DM_LIST_VERSIONS:
3653 {
3654 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3655 uint32_t remaining_data = guest_data_size;
3656 void *cur_data = argptr;
3657 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3658 int vers_size = thunk_type_size(arg_type, 0);
3659
3660 while (1) {
3661 uint32_t next = vers->next;
3662 if (next) {
3663 vers->next = vers_size + (strlen(vers->name) + 1);
3664 }
3665 if (remaining_data < vers->next) {
3666 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3667 break;
3668 }
3669 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3670 strcpy(cur_data + vers_size, vers->name);
3671 cur_data += vers->next;
3672 remaining_data -= vers->next;
3673 if (!next) {
3674 break;
3675 }
3676 vers = (void*)vers + next;
3677 }
3678 break;
3679 }
3680 default:
3681 ret = -TARGET_EINVAL;
3682 goto out;
3683 }
3684 unlock_user(argptr, guest_data, guest_data_size);
3685
3686 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3687 if (!argptr) {
3688 ret = -TARGET_EFAULT;
3689 goto out;
3690 }
3691 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3692 unlock_user(argptr, arg, target_size);
3693 }
3694 out:
3695 g_free(big_buf);
3696 return ret;
3697 }
3698
3699 static IOCTLEntry ioctl_entries[] = {
3700 #define IOCTL(cmd, access, ...) \
3701 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3702 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3703 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3704 #include "ioctls.h"
3705 { 0, 0, },
3706 };
3707
3708 /* ??? Implement proper locking for ioctls. */
3709 /* do_ioctl() Must return target values and target errnos. */
3710 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3711 {
3712 const IOCTLEntry *ie;
3713 const argtype *arg_type;
3714 abi_long ret;
3715 uint8_t buf_temp[MAX_STRUCT_SIZE];
3716 int target_size;
3717 void *argptr;
3718
3719 ie = ioctl_entries;
3720 for(;;) {
3721 if (ie->target_cmd == 0) {
3722 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3723 return -TARGET_ENOSYS;
3724 }
3725 if (ie->target_cmd == cmd)
3726 break;
3727 ie++;
3728 }
3729 arg_type = ie->arg_type;
3730 #if defined(DEBUG)
3731 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3732 #endif
3733 if (ie->do_ioctl) {
3734 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3735 }
3736
3737 switch(arg_type[0]) {
3738 case TYPE_NULL:
3739 /* no argument */
3740 ret = get_errno(ioctl(fd, ie->host_cmd));
3741 break;
3742 case TYPE_PTRVOID:
3743 case TYPE_INT:
3744 /* int argment */
3745 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3746 break;
3747 case TYPE_PTR:
3748 arg_type++;
3749 target_size = thunk_type_size(arg_type, 0);
3750 switch(ie->access) {
3751 case IOC_R:
3752 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3753 if (!is_error(ret)) {
3754 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3755 if (!argptr)
3756 return -TARGET_EFAULT;
3757 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3758 unlock_user(argptr, arg, target_size);
3759 }
3760 break;
3761 case IOC_W:
3762 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3763 if (!argptr)
3764 return -TARGET_EFAULT;
3765 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3766 unlock_user(argptr, arg, 0);
3767 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3768 break;
3769 default:
3770 case IOC_RW:
3771 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3772 if (!argptr)
3773 return -TARGET_EFAULT;
3774 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3775 unlock_user(argptr, arg, 0);
3776 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3777 if (!is_error(ret)) {
3778 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3779 if (!argptr)
3780 return -TARGET_EFAULT;
3781 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3782 unlock_user(argptr, arg, target_size);
3783 }
3784 break;
3785 }
3786 break;
3787 default:
3788 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3789 (long)cmd, arg_type[0]);
3790 ret = -TARGET_ENOSYS;
3791 break;
3792 }
3793 return ret;
3794 }
3795
3796 static const bitmask_transtbl iflag_tbl[] = {
3797 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3798 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3799 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3800 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3801 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3802 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3803 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3804 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3805 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3806 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3807 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3808 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3809 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3810 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3811 { 0, 0, 0, 0 }
3812 };
3813
3814 static const bitmask_transtbl oflag_tbl[] = {
3815 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3816 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3817 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3818 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3819 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3820 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3821 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3822 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3823 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3824 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3825 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3826 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3827 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3828 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3829 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3830 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3831 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3832 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3833 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3834 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3835 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3836 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3837 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3838 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3839 { 0, 0, 0, 0 }
3840 };
3841
3842 static const bitmask_transtbl cflag_tbl[] = {
3843 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3844 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3845 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3846 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3847 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3848 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3849 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3850 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3851 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3852 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3853 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3854 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3855 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3856 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3857 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3858 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3859 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3860 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3861 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3862 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3863 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3864 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3865 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3866 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3867 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3868 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3869 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3870 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3871 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3872 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3873 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3874 { 0, 0, 0, 0 }
3875 };
3876
3877 static const bitmask_transtbl lflag_tbl[] = {
3878 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3879 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3880 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3881 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3882 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3883 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3884 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3885 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3886 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3887 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3888 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3889 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3890 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3891 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3892 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3893 { 0, 0, 0, 0 }
3894 };
3895
3896 static void target_to_host_termios (void *dst, const void *src)
3897 {
3898 struct host_termios *host = dst;
3899 const struct target_termios *target = src;
3900
3901 host->c_iflag =
3902 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3903 host->c_oflag =
3904 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3905 host->c_cflag =
3906 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3907 host->c_lflag =
3908 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3909 host->c_line = target->c_line;
3910
3911 memset(host->c_cc, 0, sizeof(host->c_cc));
3912 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3913 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3914 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3915 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3916 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3917 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3918 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3919 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3920 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3921 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3922 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3923 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3924 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3925 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3926 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3927 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3928 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3929 }
3930
3931 static void host_to_target_termios (void *dst, const void *src)
3932 {
3933 struct target_termios *target = dst;
3934 const struct host_termios *host = src;
3935
3936 target->c_iflag =
3937 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3938 target->c_oflag =
3939 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3940 target->c_cflag =
3941 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3942 target->c_lflag =
3943 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3944 target->c_line = host->c_line;
3945
3946 memset(target->c_cc, 0, sizeof(target->c_cc));
3947 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3948 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3949 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3950 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3951 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3952 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3953 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3954 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3955 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3956 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3957 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3958 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3959 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3960 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3961 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3962 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3963 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3964 }
3965
3966 static const StructEntry struct_termios_def = {
3967 .convert = { host_to_target_termios, target_to_host_termios },
3968 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3969 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3970 };
3971
3972 static bitmask_transtbl mmap_flags_tbl[] = {
3973 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3974 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3975 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3976 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3977 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3978 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3979 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3980 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3981 { 0, 0, 0, 0 }
3982 };
3983
3984 #if defined(TARGET_I386)
3985
3986 /* NOTE: there is really one LDT for all the threads */
3987 static uint8_t *ldt_table;
3988
3989 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3990 {
3991 int size;
3992 void *p;
3993
3994 if (!ldt_table)
3995 return 0;
3996 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3997 if (size > bytecount)
3998 size = bytecount;
3999 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4000 if (!p)
4001 return -TARGET_EFAULT;
4002 /* ??? Should this by byteswapped? */
4003 memcpy(p, ldt_table, size);
4004 unlock_user(p, ptr, size);
4005 return size;
4006 }
4007
4008 /* XXX: add locking support */
4009 static abi_long write_ldt(CPUX86State *env,
4010 abi_ulong ptr, unsigned long bytecount, int oldmode)
4011 {
4012 struct target_modify_ldt_ldt_s ldt_info;
4013 struct target_modify_ldt_ldt_s *target_ldt_info;
4014 int seg_32bit, contents, read_exec_only, limit_in_pages;
4015 int seg_not_present, useable, lm;
4016 uint32_t *lp, entry_1, entry_2;
4017
4018 if (bytecount != sizeof(ldt_info))
4019 return -TARGET_EINVAL;
4020 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4021 return -TARGET_EFAULT;
4022 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4023 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4024 ldt_info.limit = tswap32(target_ldt_info->limit);
4025 ldt_info.flags = tswap32(target_ldt_info->flags);
4026 unlock_user_struct(target_ldt_info, ptr, 0);
4027
4028 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4029 return -TARGET_EINVAL;
4030 seg_32bit = ldt_info.flags & 1;
4031 contents = (ldt_info.flags >> 1) & 3;
4032 read_exec_only = (ldt_info.flags >> 3) & 1;
4033 limit_in_pages = (ldt_info.flags >> 4) & 1;
4034 seg_not_present = (ldt_info.flags >> 5) & 1;
4035 useable = (ldt_info.flags >> 6) & 1;
4036 #ifdef TARGET_ABI32
4037 lm = 0;
4038 #else
4039 lm = (ldt_info.flags >> 7) & 1;
4040 #endif
4041 if (contents == 3) {
4042 if (oldmode)
4043 return -TARGET_EINVAL;
4044 if (seg_not_present == 0)
4045 return -TARGET_EINVAL;
4046 }
4047 /* allocate the LDT */
4048 if (!ldt_table) {
4049 env->ldt.base = target_mmap(0,
4050 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4051 PROT_READ|PROT_WRITE,
4052 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4053 if (env->ldt.base == -1)
4054 return -TARGET_ENOMEM;
4055 memset(g2h(env->ldt.base), 0,
4056 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4057 env->ldt.limit = 0xffff;
4058 ldt_table = g2h(env->ldt.base);
4059 }
4060
4061 /* NOTE: same code as Linux kernel */
4062 /* Allow LDTs to be cleared by the user. */
4063 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4064 if (oldmode ||
4065 (contents == 0 &&
4066 read_exec_only == 1 &&
4067 seg_32bit == 0 &&
4068 limit_in_pages == 0 &&
4069 seg_not_present == 1 &&
4070 useable == 0 )) {
4071 entry_1 = 0;
4072 entry_2 = 0;
4073 goto install;
4074 }
4075 }
4076
4077 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4078 (ldt_info.limit & 0x0ffff);
4079 entry_2 = (ldt_info.base_addr & 0xff000000) |
4080 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4081 (ldt_info.limit & 0xf0000) |
4082 ((read_exec_only ^ 1) << 9) |
4083 (contents << 10) |
4084 ((seg_not_present ^ 1) << 15) |
4085 (seg_32bit << 22) |
4086 (limit_in_pages << 23) |
4087 (lm << 21) |
4088 0x7000;
4089 if (!oldmode)
4090 entry_2 |= (useable << 20);
4091
4092 /* Install the new entry ... */
4093 install:
4094 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4095 lp[0] = tswap32(entry_1);
4096 lp[1] = tswap32(entry_2);
4097 return 0;
4098 }
4099
4100 /* specific and weird i386 syscalls */
4101 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4102 unsigned long bytecount)
4103 {
4104 abi_long ret;
4105
4106 switch (func) {
4107 case 0:
4108 ret = read_ldt(ptr, bytecount);
4109 break;
4110 case 1:
4111 ret = write_ldt(env, ptr, bytecount, 1);
4112 break;
4113 case 0x11:
4114 ret = write_ldt(env, ptr, bytecount, 0);
4115 break;
4116 default:
4117 ret = -TARGET_ENOSYS;
4118 break;
4119 }
4120 return ret;
4121 }
4122
4123 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4124 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4125 {
4126 uint64_t *gdt_table = g2h(env->gdt.base);
4127 struct target_modify_ldt_ldt_s ldt_info;
4128 struct target_modify_ldt_ldt_s *target_ldt_info;
4129 int seg_32bit, contents, read_exec_only, limit_in_pages;
4130 int seg_not_present, useable, lm;
4131 uint32_t *lp, entry_1, entry_2;
4132 int i;
4133
4134 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4135 if (!target_ldt_info)
4136 return -TARGET_EFAULT;
4137 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4138 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4139 ldt_info.limit = tswap32(target_ldt_info->limit);
4140 ldt_info.flags = tswap32(target_ldt_info->flags);
4141 if (ldt_info.entry_number == -1) {
4142 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4143 if (gdt_table[i] == 0) {
4144 ldt_info.entry_number = i;
4145 target_ldt_info->entry_number = tswap32(i);
4146 break;
4147 }
4148 }
4149 }
4150 unlock_user_struct(target_ldt_info, ptr, 1);
4151
4152 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4153 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4154 return -TARGET_EINVAL;
4155 seg_32bit = ldt_info.flags & 1;
4156 contents = (ldt_info.flags >> 1) & 3;
4157 read_exec_only = (ldt_info.flags >> 3) & 1;
4158 limit_in_pages = (ldt_info.flags >> 4) & 1;
4159 seg_not_present = (ldt_info.flags >> 5) & 1;
4160 useable = (ldt_info.flags >> 6) & 1;
4161 #ifdef TARGET_ABI32
4162 lm = 0;
4163 #else
4164 lm = (ldt_info.flags >> 7) & 1;
4165 #endif
4166
4167 if (contents == 3) {
4168 if (seg_not_present == 0)
4169 return -TARGET_EINVAL;
4170 }
4171
4172 /* NOTE: same code as Linux kernel */
4173 /* Allow LDTs to be cleared by the user. */
4174 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4175 if ((contents == 0 &&
4176 read_exec_only == 1 &&
4177 seg_32bit == 0 &&
4178 limit_in_pages == 0 &&
4179 seg_not_present == 1 &&
4180 useable == 0 )) {
4181 entry_1 = 0;
4182 entry_2 = 0;
4183 goto install;
4184 }
4185 }
4186
4187 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4188 (ldt_info.limit & 0x0ffff);
4189 entry_2 = (ldt_info.base_addr & 0xff000000) |
4190 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4191 (ldt_info.limit & 0xf0000) |
4192 ((read_exec_only ^ 1) << 9) |
4193 (contents << 10) |
4194 ((seg_not_present ^ 1) << 15) |
4195 (seg_32bit << 22) |
4196 (limit_in_pages << 23) |
4197 (useable << 20) |
4198 (lm << 21) |
4199 0x7000;
4200
4201 /* Install the new entry ... */
4202 install:
4203 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4204 lp[0] = tswap32(entry_1);
4205 lp[1] = tswap32(entry_2);
4206 return 0;
4207 }
4208
4209 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4210 {
4211 struct target_modify_ldt_ldt_s *target_ldt_info;
4212 uint64_t *gdt_table = g2h(env->gdt.base);
4213 uint32_t base_addr, limit, flags;
4214 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4215 int seg_not_present, useable, lm;
4216 uint32_t *lp, entry_1, entry_2;
4217
4218 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4219 if (!target_ldt_info)
4220 return -TARGET_EFAULT;
4221 idx = tswap32(target_ldt_info->entry_number);
4222 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4223 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4224 unlock_user_struct(target_ldt_info, ptr, 1);
4225 return -TARGET_EINVAL;
4226 }
4227 lp = (uint32_t *)(gdt_table + idx);
4228 entry_1 = tswap32(lp[0]);
4229 entry_2 = tswap32(lp[1]);
4230
4231 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4232 contents = (entry_2 >> 10) & 3;
4233 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4234 seg_32bit = (entry_2 >> 22) & 1;
4235 limit_in_pages = (entry_2 >> 23) & 1;
4236 useable = (entry_2 >> 20) & 1;
4237 #ifdef TARGET_ABI32
4238 lm = 0;
4239 #else
4240 lm = (entry_2 >> 21) & 1;
4241 #endif
4242 flags = (seg_32bit << 0) | (contents << 1) |
4243 (read_exec_only << 3) | (limit_in_pages << 4) |
4244 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4245 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4246 base_addr = (entry_1 >> 16) |
4247 (entry_2 & 0xff000000) |
4248 ((entry_2 & 0xff) << 16);
4249 target_ldt_info->base_addr = tswapal(base_addr);
4250 target_ldt_info->limit = tswap32(limit);
4251 target_ldt_info->flags = tswap32(flags);
4252 unlock_user_struct(target_ldt_info, ptr, 1);
4253 return 0;
4254 }
4255 #endif /* TARGET_I386 && TARGET_ABI32 */
4256
4257 #ifndef TARGET_ABI32
4258 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4259 {
4260 abi_long ret = 0;
4261 abi_ulong val;
4262 int idx;
4263
4264 switch(code) {
4265 case TARGET_ARCH_SET_GS:
4266 case TARGET_ARCH_SET_FS:
4267 if (code == TARGET_ARCH_SET_GS)
4268 idx = R_GS;
4269 else
4270 idx = R_FS;
4271 cpu_x86_load_seg(env, idx, 0);
4272 env->segs[idx].base = addr;
4273 break;
4274 case TARGET_ARCH_GET_GS:
4275 case TARGET_ARCH_GET_FS:
4276 if (code == TARGET_ARCH_GET_GS)
4277 idx = R_GS;
4278 else
4279 idx = R_FS;
4280 val = env->segs[idx].base;
4281 if (put_user(val, addr, abi_ulong))
4282 ret = -TARGET_EFAULT;
4283 break;
4284 default:
4285 ret = -TARGET_EINVAL;
4286 break;
4287 }
4288 return ret;
4289 }
4290 #endif
4291
4292 #endif /* defined(TARGET_I386) */
4293
4294 #define NEW_STACK_SIZE 0x40000
4295
4296 #if defined(CONFIG_USE_NPTL)
4297
4298 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4299 typedef struct {
4300 CPUArchState *env;
4301 pthread_mutex_t mutex;
4302 pthread_cond_t cond;
4303 pthread_t thread;
4304 uint32_t tid;
4305 abi_ulong child_tidptr;
4306 abi_ulong parent_tidptr;
4307 sigset_t sigmask;
4308 } new_thread_info;
4309
4310 static void *clone_func(void *arg)
4311 {
4312 new_thread_info *info = arg;
4313 CPUArchState *env;
4314 TaskState *ts;
4315
4316 env = info->env;
4317 thread_env = env;
4318 ts = (TaskState *)thread_env->opaque;
4319 info->tid = gettid();
4320 env->host_tid = info->tid;
4321 task_settid(ts);
4322 if (info->child_tidptr)
4323 put_user_u32(info->tid, info->child_tidptr);
4324 if (info->parent_tidptr)
4325 put_user_u32(info->tid, info->parent_tidptr);
4326 /* Enable signals. */
4327 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4328 /* Signal to the parent that we're ready. */
4329 pthread_mutex_lock(&info->mutex);
4330 pthread_cond_broadcast(&info->cond);
4331 pthread_mutex_unlock(&info->mutex);
4332 /* Wait until the parent has finshed initializing the tls state. */
4333 pthread_mutex_lock(&clone_lock);
4334 pthread_mutex_unlock(&clone_lock);
4335 cpu_loop(env);
4336 /* never exits */
4337 return NULL;
4338 }
4339 #else
4340
4341 static int clone_func(void *arg)
4342 {
4343 CPUArchState *env = arg;
4344 cpu_loop(env);
4345 /* never exits */
4346 return 0;
4347 }
4348 #endif
4349
4350 /* do_fork() Must return host values and target errnos (unlike most
4351 do_*() functions). */
4352 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4353 abi_ulong parent_tidptr, target_ulong newtls,
4354 abi_ulong child_tidptr)
4355 {
4356 int ret;
4357 TaskState *ts;
4358 CPUArchState *new_env;
4359 #if defined(CONFIG_USE_NPTL)
4360 unsigned int nptl_flags;
4361 sigset_t sigmask;
4362 #else
4363 uint8_t *new_stack;
4364 #endif
4365
4366 /* Emulate vfork() with fork() */
4367 if (flags & CLONE_VFORK)
4368 flags &= ~(CLONE_VFORK | CLONE_VM);
4369
4370 if (flags & CLONE_VM) {
4371 TaskState *parent_ts = (TaskState *)env->opaque;
4372 #if defined(CONFIG_USE_NPTL)
4373 new_thread_info info;
4374 pthread_attr_t attr;
4375 #endif
4376 ts = g_malloc0(sizeof(TaskState));
4377 init_task_state(ts);
4378 /* we create a new CPU instance. */
4379 new_env = cpu_copy(env);
4380 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4381 cpu_reset(ENV_GET_CPU(new_env));
4382 #endif
4383 /* Init regs that differ from the parent. */
4384 cpu_clone_regs(new_env, newsp);
4385 new_env->opaque = ts;
4386 ts->bprm = parent_ts->bprm;
4387 ts->info = parent_ts->info;
4388 #if defined(CONFIG_USE_NPTL)
4389 nptl_flags = flags;
4390 flags &= ~CLONE_NPTL_FLAGS2;
4391
4392 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4393 ts->child_tidptr = child_tidptr;
4394 }
4395
4396 if (nptl_flags & CLONE_SETTLS)
4397 cpu_set_tls (new_env, newtls);
4398
4399 /* Grab a mutex so that thread setup appears atomic. */
4400 pthread_mutex_lock(&clone_lock);
4401
4402 memset(&info, 0, sizeof(info));
4403 pthread_mutex_init(&info.mutex, NULL);
4404 pthread_mutex_lock(&info.mutex);
4405 pthread_cond_init(&info.cond, NULL);
4406 info.env = new_env;
4407 if (nptl_flags & CLONE_CHILD_SETTID)
4408 info.child_tidptr = child_tidptr;
4409 if (nptl_flags & CLONE_PARENT_SETTID)
4410 info.parent_tidptr = parent_tidptr;
4411
4412 ret = pthread_attr_init(&attr);
4413 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4414 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4415 /* It is not safe to deliver signals until the child has finished
4416 initializing, so temporarily block all signals. */
4417 sigfillset(&sigmask);
4418 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4419
4420 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4421 /* TODO: Free new CPU state if thread creation failed. */
4422
4423 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4424 pthread_attr_destroy(&attr);
4425 if (ret == 0) {
4426 /* Wait for the child to initialize. */
4427 pthread_cond_wait(&info.cond, &info.mutex);
4428 ret = info.tid;
4429 if (flags & CLONE_PARENT_SETTID)
4430 put_user_u32(ret, parent_tidptr);
4431 } else {
4432 ret = -1;
4433 }
4434 pthread_mutex_unlock(&info.mutex);
4435 pthread_cond_destroy(&info.cond);
4436 pthread_mutex_destroy(&info.mutex);
4437 pthread_mutex_unlock(&clone_lock);
4438 #else
4439 if (flags & CLONE_NPTL_FLAGS2)
4440 return -EINVAL;
4441 /* This is probably going to die very quickly, but do it anyway. */
4442 new_stack = g_malloc0 (NEW_STACK_SIZE);
4443 #ifdef __ia64__
4444 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4445 #else
4446 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4447 #endif
4448 #endif
4449 } else {
4450 /* if no CLONE_VM, we consider it is a fork */
4451 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4452 return -EINVAL;
4453 fork_start();
4454 ret = fork();
4455 if (ret == 0) {
4456 /* Child Process. */
4457 cpu_clone_regs(env, newsp);
4458 fork_end(1);
4459 #if defined(CONFIG_USE_NPTL)
4460 /* There is a race condition here. The parent process could
4461 theoretically read the TID in the child process before the child
4462 tid is set. This would require using either ptrace
4463 (not implemented) or having *_tidptr to point at a shared memory
4464 mapping. We can't repeat the spinlock hack used above because
4465 the child process gets its own copy of the lock. */
4466 if (flags & CLONE_CHILD_SETTID)
4467 put_user_u32(gettid(), child_tidptr);
4468 if (flags & CLONE_PARENT_SETTID)
4469 put_user_u32(gettid(), parent_tidptr);
4470 ts = (TaskState *)env->opaque;
4471 if (flags & CLONE_SETTLS)
4472 cpu_set_tls (env, newtls);
4473 if (flags & CLONE_CHILD_CLEARTID)
4474 ts->child_tidptr = child_tidptr;
4475 #endif
4476 } else {
4477 fork_end(0);
4478 }
4479 }
4480 return ret;
4481 }
4482
4483 /* warning : doesn't handle linux specific flags... */
4484 static int target_to_host_fcntl_cmd(int cmd)
4485 {
4486 switch(cmd) {
4487 case TARGET_F_DUPFD:
4488 case TARGET_F_GETFD:
4489 case TARGET_F_SETFD:
4490 case TARGET_F_GETFL:
4491 case TARGET_F_SETFL:
4492 return cmd;
4493 case TARGET_F_GETLK:
4494 return F_GETLK;
4495 case TARGET_F_SETLK:
4496 return F_SETLK;
4497 case TARGET_F_SETLKW:
4498 return F_SETLKW;
4499 case TARGET_F_GETOWN:
4500 return F_GETOWN;
4501 case TARGET_F_SETOWN:
4502 return F_SETOWN;
4503 case TARGET_F_GETSIG:
4504 return F_GETSIG;
4505 case TARGET_F_SETSIG:
4506 return F_SETSIG;
4507 #if TARGET_ABI_BITS == 32
4508 case TARGET_F_GETLK64:
4509 return F_GETLK64;
4510 case TARGET_F_SETLK64:
4511 return F_SETLK64;
4512 case TARGET_F_SETLKW64:
4513 return F_SETLKW64;
4514 #endif
4515 case TARGET_F_SETLEASE:
4516 return F_SETLEASE;
4517 case TARGET_F_GETLEASE:
4518 return F_GETLEASE;
4519 #ifdef F_DUPFD_CLOEXEC
4520 case TARGET_F_DUPFD_CLOEXEC:
4521 return F_DUPFD_CLOEXEC;
4522 #endif
4523 case TARGET_F_NOTIFY:
4524 return F_NOTIFY;
4525 default:
4526 return -TARGET_EINVAL;
4527 }
4528 return -TARGET_EINVAL;
4529 }
4530
4531 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4532 static const bitmask_transtbl flock_tbl[] = {
4533 TRANSTBL_CONVERT(F_RDLCK),
4534 TRANSTBL_CONVERT(F_WRLCK),
4535 TRANSTBL_CONVERT(F_UNLCK),
4536 TRANSTBL_CONVERT(F_EXLCK),
4537 TRANSTBL_CONVERT(F_SHLCK),
4538 { 0, 0, 0, 0 }
4539 };
4540
4541 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4542 {
4543 struct flock fl;
4544 struct target_flock *target_fl;
4545 struct flock64 fl64;
4546 struct target_flock64 *target_fl64;
4547 abi_long ret;
4548 int host_cmd = target_to_host_fcntl_cmd(cmd);
4549
4550 if (host_cmd == -TARGET_EINVAL)
4551 return host_cmd;
4552
4553 switch(cmd) {
4554 case TARGET_F_GETLK:
4555 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4556 return -TARGET_EFAULT;
4557 fl.l_type =
4558 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4559 fl.l_whence = tswap16(target_fl->l_whence);
4560 fl.l_start = tswapal(target_fl->l_start);
4561 fl.l_len = tswapal(target_fl->l_len);
4562 fl.l_pid = tswap32(target_fl->l_pid);
4563 unlock_user_struct(target_fl, arg, 0);
4564 ret = get_errno(fcntl(fd, host_cmd, &fl));
4565 if (ret == 0) {
4566 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4567 return -TARGET_EFAULT;
4568 target_fl->l_type =
4569 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4570 target_fl->l_whence = tswap16(fl.l_whence);
4571 target_fl->l_start = tswapal(fl.l_start);
4572 target_fl->l_len = tswapal(fl.l_len);
4573 target_fl->l_pid = tswap32(fl.l_pid);
4574 unlock_user_struct(target_fl, arg, 1);
4575 }
4576 break;
4577
4578 case TARGET_F_SETLK:
4579 case TARGET_F_SETLKW:
4580 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4581 return -TARGET_EFAULT;
4582 fl.l_type =
4583 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4584 fl.l_whence = tswap16(target_fl->l_whence);
4585 fl.l_start = tswapal(target_fl->l_start);
4586 fl.l_len = tswapal(target_fl->l_len);
4587 fl.l_pid = tswap32(target_fl->l_pid);
4588 unlock_user_struct(target_fl, arg, 0);
4589 ret = get_errno(fcntl(fd, host_cmd, &fl));
4590 break;
4591
4592 case TARGET_F_GETLK64:
4593 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4594 return -TARGET_EFAULT;
4595 fl64.l_type =
4596 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4597 fl64.l_whence = tswap16(target_fl64->l_whence);
4598 fl64.l_start = tswap64(target_fl64->l_start);
4599 fl64.l_len = tswap64(target_fl64->l_len);
4600 fl64.l_pid = tswap32(target_fl64->l_pid);
4601 unlock_user_struct(target_fl64, arg, 0);
4602 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4603 if (ret == 0) {
4604 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4605 return -TARGET_EFAULT;
4606 target_fl64->l_type =
4607 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4608 target_fl64->l_whence = tswap16(fl64.l_whence);
4609 target_fl64->l_start = tswap64(fl64.l_start);
4610 target_fl64->l_len = tswap64(fl64.l_len);
4611 target_fl64->l_pid = tswap32(fl64.l_pid);
4612 unlock_user_struct(target_fl64, arg, 1);
4613 }
4614 break;
4615 case TARGET_F_SETLK64:
4616 case TARGET_F_SETLKW64:
4617 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4618 return -TARGET_EFAULT;
4619 fl64.l_type =
4620 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4621 fl64.l_whence = tswap16(target_fl64->l_whence);
4622 fl64.l_start = tswap64(target_fl64->l_start);
4623 fl64.l_len = tswap64(target_fl64->l_len);
4624 fl64.l_pid = tswap32(target_fl64->l_pid);
4625 unlock_user_struct(target_fl64, arg, 0);
4626 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4627 break;
4628
4629 case TARGET_F_GETFL:
4630 ret = get_errno(fcntl(fd, host_cmd, arg));
4631 if (ret >= 0) {
4632 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4633 }
4634 break;
4635
4636 case TARGET_F_SETFL:
4637 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4638 break;
4639
4640 case TARGET_F_SETOWN:
4641 case TARGET_F_GETOWN:
4642 case TARGET_F_SETSIG:
4643 case TARGET_F_GETSIG:
4644 case TARGET_F_SETLEASE:
4645 case TARGET_F_GETLEASE:
4646 ret = get_errno(fcntl(fd, host_cmd, arg));
4647 break;
4648
4649 default:
4650 ret = get_errno(fcntl(fd, cmd, arg));
4651 break;
4652 }
4653 return ret;
4654 }
4655
4656 #ifdef USE_UID16
4657
4658 static inline int high2lowuid(int uid)
4659 {
4660 if (uid > 65535)
4661 return 65534;
4662 else
4663 return uid;
4664 }
4665
4666 static inline int high2lowgid(int gid)
4667 {
4668 if (gid > 65535)
4669 return 65534;
4670 else
4671 return gid;
4672 }
4673
4674 static inline int low2highuid(int uid)
4675 {
4676 if ((int16_t)uid == -1)
4677 return -1;
4678 else
4679 return uid;
4680 }
4681
4682 static inline int low2highgid(int gid)
4683 {
4684 if ((int16_t)gid == -1)
4685 return -1;
4686 else
4687 return gid;
4688 }
4689 static inline int tswapid(int id)
4690 {
4691 return tswap16(id);
4692 }
4693 #else /* !USE_UID16 */
4694 static inline int high2lowuid(int uid)
4695 {
4696 return uid;
4697 }
4698 static inline int high2lowgid(int gid)
4699 {
4700 return gid;
4701 }
4702 static inline int low2highuid(int uid)
4703 {
4704 return uid;
4705 }
4706 static inline int low2highgid(int gid)
4707 {
4708 return gid;
4709 }
4710 static inline int tswapid(int id)
4711 {
4712 return tswap32(id);
4713 }
4714 #endif /* USE_UID16 */
4715
4716 void syscall_init(void)
4717 {
4718 IOCTLEntry *ie;
4719 const argtype *arg_type;
4720 int size;
4721 int i;
4722
4723 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4724 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4725 #include "syscall_types.h"
4726 #undef STRUCT
4727 #undef STRUCT_SPECIAL
4728
4729 /* Build target_to_host_errno_table[] table from
4730 * host_to_target_errno_table[]. */
4731 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4732 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4733 }
4734
4735 /* we patch the ioctl size if necessary. We rely on the fact that
4736 no ioctl has all the bits at '1' in the size field */
4737 ie = ioctl_entries;
4738 while (ie->target_cmd != 0) {
4739 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4740 TARGET_IOC_SIZEMASK) {
4741 arg_type = ie->arg_type;
4742 if (arg_type[0] != TYPE_PTR) {
4743 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4744 ie->target_cmd);
4745 exit(1);
4746 }
4747 arg_type++;
4748 size = thunk_type_size(arg_type, 0);
4749 ie->target_cmd = (ie->target_cmd &
4750 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4751 (size << TARGET_IOC_SIZESHIFT);
4752 }
4753
4754 /* automatic consistency check if same arch */
4755 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4756 (defined(__x86_64__) && defined(TARGET_X86_64))
4757 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4758 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4759 ie->name, ie->target_cmd, ie->host_cmd);
4760 }
4761 #endif
4762 ie++;
4763 }
4764 }
4765
4766 #if TARGET_ABI_BITS == 32
4767 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4768 {
4769 #ifdef TARGET_WORDS_BIGENDIAN
4770 return ((uint64_t)word0 << 32) | word1;
4771 #else
4772 return ((uint64_t)word1 << 32) | word0;
4773 #endif
4774 }
4775 #else /* TARGET_ABI_BITS == 32 */
4776 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4777 {
4778 return word0;
4779 }
4780 #endif /* TARGET_ABI_BITS != 32 */
4781
4782 #ifdef TARGET_NR_truncate64
4783 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4784 abi_long arg2,
4785 abi_long arg3,
4786 abi_long arg4)
4787 {
4788 if (regpairs_aligned(cpu_env)) {
4789 arg2 = arg3;
4790 arg3 = arg4;
4791 }
4792 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4793 }
4794 #endif
4795
4796 #ifdef TARGET_NR_ftruncate64
4797 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4798 abi_long arg2,
4799 abi_long arg3,
4800 abi_long arg4)
4801 {
4802 if (regpairs_aligned(cpu_env)) {
4803 arg2 = arg3;
4804 arg3 = arg4;
4805 }
4806 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4807 }
4808 #endif
4809
4810 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4811 abi_ulong target_addr)
4812 {
4813 struct target_timespec *target_ts;
4814
4815 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4816 return -TARGET_EFAULT;
4817 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4818 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4819 unlock_user_struct(target_ts, target_addr, 0);
4820 return 0;
4821 }
4822
4823 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4824 struct timespec *host_ts)
4825 {
4826 struct target_timespec *target_ts;
4827
4828 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4829 return -TARGET_EFAULT;
4830 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4831 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4832 unlock_user_struct(target_ts, target_addr, 1);
4833 return 0;
4834 }
4835
4836 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4837 static inline abi_long host_to_target_stat64(void *cpu_env,
4838 abi_ulong target_addr,
4839 struct stat *host_st)
4840 {
4841 #ifdef TARGET_ARM
4842 if (((CPUARMState *)cpu_env)->eabi) {
4843 struct target_eabi_stat64 *target_st;
4844
4845 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4846 return -TARGET_EFAULT;
4847 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4848 __put_user(host_st->st_dev, &target_st->st_dev);
4849 __put_user(host_st->st_ino, &target_st->st_ino);
4850 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4851 __put_user(host_st->st_ino, &target_st->__st_ino);
4852 #endif
4853 __put_user(host_st->st_mode, &target_st->st_mode);
4854 __put_user(host_st->st_nlink, &target_st->st_nlink);
4855 __put_user(host_st->st_uid, &target_st->st_uid);
4856 __put_user(host_st->st_gid, &target_st->st_gid);
4857 __put_user(host_st->st_rdev, &target_st->st_rdev);
4858 __put_user(host_st->st_size, &target_st->st_size);
4859 __put_user(host_st->st_blksize, &target_st->st_blksize);
4860 __put_user(host_st->st_blocks, &target_st->st_blocks);
4861 __put_user(host_st->st_atime, &target_st->target_st_atime);
4862 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4863 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4864 unlock_user_struct(target_st, target_addr, 1);
4865 } else
4866 #endif
4867 {
4868 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4869 struct target_stat *target_st;
4870 #else
4871 struct target_stat64 *target_st;
4872 #endif
4873
4874 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4875 return -TARGET_EFAULT;
4876 memset(target_st, 0, sizeof(*target_st));
4877 __put_user(host_st->st_dev, &target_st->st_dev);
4878 __put_user(host_st->st_ino, &target_st->st_ino);
4879 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4880 __put_user(host_st->st_ino, &target_st->__st_ino);
4881 #endif
4882 __put_user(host_st->st_mode, &target_st->st_mode);
4883 __put_user(host_st->st_nlink, &target_st->st_nlink);
4884 __put_user(host_st->st_uid, &target_st->st_uid);
4885 __put_user(host_st->st_gid, &target_st->st_gid);
4886 __put_user(host_st->st_rdev, &target_st->st_rdev);
4887 /* XXX: better use of kernel struct */
4888 __put_user(host_st->st_size, &target_st->st_size);
4889 __put_user(host_st->st_blksize, &target_st->st_blksize);
4890 __put_user(host_st->st_blocks, &target_st->st_blocks);
4891 __put_user(host_st->st_atime, &target_st->target_st_atime);
4892 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4893 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4894 unlock_user_struct(target_st, target_addr, 1);
4895 }
4896
4897 return 0;
4898 }
4899 #endif
4900
4901 #if defined(CONFIG_USE_NPTL)
4902 /* ??? Using host futex calls even when target atomic operations
4903 are not really atomic probably breaks things. However implementing
4904 futexes locally would make futexes shared between multiple processes
4905 tricky. However they're probably useless because guest atomic
4906 operations won't work either. */
4907 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4908 target_ulong uaddr2, int val3)
4909 {
4910 struct timespec ts, *pts;
4911 int base_op;
4912
4913 /* ??? We assume FUTEX_* constants are the same on both host
4914 and target. */
4915 #ifdef FUTEX_CMD_MASK
4916 base_op = op & FUTEX_CMD_MASK;
4917 #else
4918 base_op = op;
4919 #endif
4920 switch (base_op) {
4921 case FUTEX_WAIT:
4922 if (timeout) {
4923 pts = &ts;
4924 target_to_host_timespec(pts, timeout);
4925 } else {
4926 pts = NULL;
4927 }
4928 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4929 pts, NULL, 0));
4930 case FUTEX_WAKE:
4931 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4932 case FUTEX_FD:
4933 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4934 case FUTEX_REQUEUE:
4935 case FUTEX_CMP_REQUEUE:
4936 case FUTEX_WAKE_OP:
4937 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4938 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4939 But the prototype takes a `struct timespec *'; insert casts
4940 to satisfy the compiler. We do not need to tswap TIMEOUT
4941 since it's not compared to guest memory. */
4942 pts = (struct timespec *)(uintptr_t) timeout;
4943 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4944 g2h(uaddr2),
4945 (base_op == FUTEX_CMP_REQUEUE
4946 ? tswap32(val3)
4947 : val3)));
4948 default:
4949 return -TARGET_ENOSYS;
4950 }
4951 }
4952 #endif
4953
4954 /* Map host to target signal numbers for the wait family of syscalls.
4955 Assume all other status bits are the same. */
4956 int host_to_target_waitstatus(int status)
4957 {
4958 if (WIFSIGNALED(status)) {
4959 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4960 }
4961 if (WIFSTOPPED(status)) {
4962 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4963 | (status & 0xff);
4964 }
4965 return status;
4966 }
4967
4968 int get_osversion(void)
4969 {
4970 static int osversion;
4971 struct new_utsname buf;
4972 const char *s;
4973 int i, n, tmp;
4974 if (osversion)
4975 return osversion;
4976 if (qemu_uname_release && *qemu_uname_release) {
4977 s = qemu_uname_release;
4978 } else {
4979 if (sys_uname(&buf))
4980 return 0;
4981 s = buf.release;
4982 }
4983 tmp = 0;
4984 for (i = 0; i < 3; i++) {
4985 n = 0;
4986 while (*s >= '0' && *s <= '9') {
4987 n *= 10;
4988 n += *s - '0';
4989 s++;
4990 }
4991 tmp = (tmp << 8) + n;
4992 if (*s == '.')
4993 s++;
4994 }
4995 osversion = tmp;
4996 return osversion;
4997 }
4998
4999
5000 static int open_self_maps(void *cpu_env, int fd)
5001 {
5002 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5003 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5004 #endif
5005 FILE *fp;
5006 char *line = NULL;
5007 size_t len = 0;
5008 ssize_t read;
5009
5010 fp = fopen("/proc/self/maps", "r");
5011 if (fp == NULL) {
5012 return -EACCES;
5013 }
5014
5015 while ((read = getline(&line, &len, fp)) != -1) {
5016 int fields, dev_maj, dev_min, inode;
5017 uint64_t min, max, offset;
5018 char flag_r, flag_w, flag_x, flag_p;
5019 char path[512] = "";
5020 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5021 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5022 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5023
5024 if ((fields < 10) || (fields > 11)) {
5025 continue;
5026 }
5027 if (!strncmp(path, "[stack]", 7)) {
5028 continue;
5029 }
5030 if (h2g_valid(min) && h2g_valid(max)) {
5031 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5032 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n",
5033 h2g(min), h2g(max), flag_r, flag_w,
5034 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5035 path[0] ? " " : "", path);
5036 }
5037 }
5038
5039 free(line);
5040 fclose(fp);
5041
5042 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5043 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5044 (unsigned long long)ts->info->stack_limit,
5045 (unsigned long long)(ts->info->start_stack +
5046 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
5047 (unsigned long long)0);
5048 #endif
5049
5050 return 0;
5051 }
5052
5053 static int open_self_stat(void *cpu_env, int fd)
5054 {
5055 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5056 abi_ulong start_stack = ts->info->start_stack;
5057 int i;
5058
5059 for (i = 0; i < 44; i++) {
5060 char buf[128];
5061 int len;
5062 uint64_t val = 0;
5063
5064 if (i == 0) {
5065 /* pid */
5066 val = getpid();
5067 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5068 } else if (i == 1) {
5069 /* app name */
5070 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5071 } else if (i == 27) {
5072 /* stack bottom */
5073 val = start_stack;
5074 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5075 } else {
5076 /* for the rest, there is MasterCard */
5077 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5078 }
5079
5080 len = strlen(buf);
5081 if (write(fd, buf, len) != len) {
5082 return -1;
5083 }
5084 }
5085
5086 return 0;
5087 }
5088
5089 static int open_self_auxv(void *cpu_env, int fd)
5090 {
5091 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5092 abi_ulong auxv = ts->info->saved_auxv;
5093 abi_ulong len = ts->info->auxv_len;
5094 char *ptr;
5095
5096 /*
5097 * Auxiliary vector is stored in target process stack.
5098 * read in whole auxv vector and copy it to file
5099 */
5100 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5101 if (ptr != NULL) {
5102 while (len > 0) {
5103 ssize_t r;
5104 r = write(fd, ptr, len);
5105 if (r <= 0) {
5106 break;
5107 }
5108 len -= r;
5109 ptr += r;
5110 }
5111 lseek(fd, 0, SEEK_SET);
5112 unlock_user(ptr, auxv, len);
5113 }
5114
5115 return 0;
5116 }
5117
5118 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5119 {
5120 struct fake_open {
5121 const char *filename;
5122 int (*fill)(void *cpu_env, int fd);
5123 };
5124 const struct fake_open *fake_open;
5125 static const struct fake_open fakes[] = {
5126 { "/proc/self/maps", open_self_maps },
5127 { "/proc/self/stat", open_self_stat },
5128 { "/proc/self/auxv", open_self_auxv },
5129 { NULL, NULL }
5130 };
5131
5132 for (fake_open = fakes; fake_open->filename; fake_open++) {
5133 if (!strncmp(pathname, fake_open->filename,
5134 strlen(fake_open->filename))) {
5135 break;
5136 }
5137 }
5138
5139 if (fake_open->filename) {
5140 const char *tmpdir;
5141 char filename[PATH_MAX];
5142 int fd, r;
5143
5144 /* create temporary file to map stat to */
5145 tmpdir = getenv("TMPDIR");
5146 if (!tmpdir)
5147 tmpdir = "/tmp";
5148 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5149 fd = mkstemp(filename);
5150 if (fd < 0) {
5151 return fd;
5152 }
5153 unlink(filename);
5154
5155 if ((r = fake_open->fill(cpu_env, fd))) {
5156 close(fd);
5157 return r;
5158 }
5159 lseek(fd, 0, SEEK_SET);
5160
5161 return fd;
5162 }
5163
5164 return get_errno(open(path(pathname), flags, mode));
5165 }
5166
5167 /* do_syscall() should always have a single exit point at the end so
5168 that actions, such as logging of syscall results, can be performed.
5169 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5170 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5171 abi_long arg2, abi_long arg3, abi_long arg4,
5172 abi_long arg5, abi_long arg6, abi_long arg7,
5173 abi_long arg8)
5174 {
5175 abi_long ret;
5176 struct stat st;
5177 struct statfs stfs;
5178 void *p;
5179
5180 #ifdef DEBUG
5181 gemu_log("syscall %d", num);
5182 #endif
5183 if(do_strace)
5184 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5185
5186 switch(num) {
5187 case TARGET_NR_exit:
5188 #ifdef CONFIG_USE_NPTL
5189 /* In old applications this may be used to implement _exit(2).
5190 However in threaded applictions it is used for thread termination,
5191 and _exit_group is used for application termination.
5192 Do thread termination if we have more then one thread. */
5193 /* FIXME: This probably breaks if a signal arrives. We should probably
5194 be disabling signals. */
5195 if (first_cpu->next_cpu) {
5196 TaskState *ts;
5197 CPUArchState **lastp;
5198 CPUArchState *p;
5199
5200 cpu_list_lock();
5201 lastp = &first_cpu;
5202 p = first_cpu;
5203 while (p && p != (CPUArchState *)cpu_env) {
5204 lastp = &p->next_cpu;
5205 p = p->next_cpu;
5206 }
5207 /* If we didn't find the CPU for this thread then something is
5208 horribly wrong. */
5209 if (!p)
5210 abort();
5211 /* Remove the CPU from the list. */
5212 *lastp = p->next_cpu;
5213 cpu_list_unlock();
5214 ts = ((CPUArchState *)cpu_env)->opaque;
5215 if (ts->child_tidptr) {
5216 put_user_u32(0, ts->child_tidptr);
5217 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5218 NULL, NULL, 0);
5219 }
5220 thread_env = NULL;
5221 object_delete(OBJECT(ENV_GET_CPU(cpu_env)));
5222 g_free(ts);
5223 pthread_exit(NULL);
5224 }
5225 #endif
5226 #ifdef TARGET_GPROF
5227 _mcleanup();
5228 #endif
5229 gdb_exit(cpu_env, arg1);
5230 _exit(arg1);
5231 ret = 0; /* avoid warning */
5232 break;
5233 case TARGET_NR_read:
5234 if (arg3 == 0)
5235 ret = 0;
5236 else {
5237 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5238 goto efault;
5239 ret = get_errno(read(arg1, p, arg3));
5240 unlock_user(p, arg2, ret);
5241 }
5242 break;
5243 case TARGET_NR_write:
5244 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5245 goto efault;
5246 ret = get_errno(write(arg1, p, arg3));
5247 unlock_user(p, arg2, 0);
5248 break;
5249 case TARGET_NR_open:
5250 if (!(p = lock_user_string(arg1)))
5251 goto efault;
5252 ret = get_errno(do_open(cpu_env, p,
5253 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5254 arg3));
5255 unlock_user(p, arg1, 0);
5256 break;
5257 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5258 case TARGET_NR_openat:
5259 if (!(p = lock_user_string(arg2)))
5260 goto efault;
5261 ret = get_errno(sys_openat(arg1,
5262 path(p),
5263 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5264 arg4));
5265 unlock_user(p, arg2, 0);
5266 break;
5267 #endif
5268 case TARGET_NR_close:
5269 ret = get_errno(close(arg1));
5270 break;
5271 case TARGET_NR_brk:
5272 ret = do_brk(arg1);
5273 break;
5274 case TARGET_NR_fork:
5275 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5276 break;
5277 #ifdef TARGET_NR_waitpid
5278 case TARGET_NR_waitpid:
5279 {
5280 int status;
5281 ret = get_errno(waitpid(arg1, &status, arg3));
5282 if (!is_error(ret) && arg2 && ret
5283 && put_user_s32(host_to_target_waitstatus(status), arg2))
5284 goto efault;
5285 }
5286 break;
5287 #endif
5288 #ifdef TARGET_NR_waitid
5289 case TARGET_NR_waitid:
5290 {
5291 siginfo_t info;
5292 info.si_pid = 0;
5293 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5294 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5295 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5296 goto efault;
5297 host_to_target_siginfo(p, &info);
5298 unlock_user(p, arg3, sizeof(target_siginfo_t));
5299 }
5300 }
5301 break;
5302 #endif
5303 #ifdef TARGET_NR_creat /* not on alpha */
5304 case TARGET_NR_creat:
5305 if (!(p = lock_user_string(arg1)))
5306 goto efault;
5307 ret = get_errno(creat(p, arg2));
5308 unlock_user(p, arg1, 0);
5309 break;
5310 #endif
5311 case TARGET_NR_link:
5312 {
5313 void * p2;
5314 p = lock_user_string(arg1);
5315 p2 = lock_user_string(arg2);
5316 if (!p || !p2)
5317 ret = -TARGET_EFAULT;
5318 else
5319 ret = get_errno(link(p, p2));
5320 unlock_user(p2, arg2, 0);
5321 unlock_user(p, arg1, 0);
5322 }
5323 break;
5324 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5325 case TARGET_NR_linkat:
5326 {
5327 void * p2 = NULL;
5328 if (!arg2 || !arg4)
5329 goto efault;
5330 p = lock_user_string(arg2);
5331 p2 = lock_user_string(arg4);
5332 if (!p || !p2)
5333 ret = -TARGET_EFAULT;
5334 else
5335 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
5336 unlock_user(p, arg2, 0);
5337 unlock_user(p2, arg4, 0);
5338 }
5339 break;
5340 #endif
5341 case TARGET_NR_unlink:
5342 if (!(p = lock_user_string(arg1)))
5343 goto efault;
5344 ret = get_errno(unlink(p));
5345 unlock_user(p, arg1, 0);
5346 break;
5347 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5348 case TARGET_NR_unlinkat:
5349 if (!(p = lock_user_string(arg2)))
5350 goto efault;
5351 ret = get_errno(sys_unlinkat(arg1, p, arg3));
5352 unlock_user(p, arg2, 0);
5353 break;
5354 #endif
5355 case TARGET_NR_execve:
5356 {
5357 char **argp, **envp;
5358 int argc, envc;
5359 abi_ulong gp;
5360 abi_ulong guest_argp;
5361 abi_ulong guest_envp;
5362 abi_ulong addr;
5363 char **q;
5364 int total_size = 0;
5365
5366 argc = 0;
5367 guest_argp = arg2;
5368 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5369 if (get_user_ual(addr, gp))
5370 goto efault;
5371 if (!addr)
5372 break;
5373 argc++;
5374 }
5375 envc = 0;
5376 guest_envp = arg3;
5377 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5378 if (get_user_ual(addr, gp))
5379 goto efault;
5380 if (!addr)
5381 break;
5382 envc++;
5383 }
5384
5385 argp = alloca((argc + 1) * sizeof(void *));
5386 envp = alloca((envc + 1) * sizeof(void *));
5387
5388 for (gp = guest_argp, q = argp; gp;
5389 gp += sizeof(abi_ulong), q++) {
5390 if (get_user_ual(addr, gp))
5391 goto execve_efault;
5392 if (!addr)
5393 break;
5394 if (!(*q = lock_user_string(addr)))
5395 goto execve_efault;
5396 total_size += strlen(*q) + 1;
5397 }
5398 *q = NULL;
5399
5400 for (gp = guest_envp, q = envp; gp;
5401 gp += sizeof(abi_ulong), q++) {
5402 if (get_user_ual(addr, gp))
5403 goto execve_efault;
5404 if (!addr)
5405 break;
5406 if (!(*q = lock_user_string(addr)))
5407 goto execve_efault;
5408 total_size += strlen(*q) + 1;
5409 }
5410 *q = NULL;
5411
5412 /* This case will not be caught by the host's execve() if its
5413 page size is bigger than the target's. */
5414 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5415 ret = -TARGET_E2BIG;
5416 goto execve_end;
5417 }
5418 if (!(p = lock_user_string(arg1)))
5419 goto execve_efault;
5420 ret = get_errno(execve(p, argp, envp));
5421 unlock_user(p, arg1, 0);
5422
5423 goto execve_end;
5424
5425 execve_efault:
5426 ret = -TARGET_EFAULT;
5427
5428 execve_end:
5429 for (gp = guest_argp, q = argp; *q;
5430 gp += sizeof(abi_ulong), q++) {
5431 if (get_user_ual(addr, gp)
5432 || !addr)
5433 break;
5434 unlock_user(*q, addr, 0);
5435 }
5436 for (gp = guest_envp, q = envp; *q;
5437 gp += sizeof(abi_ulong), q++) {
5438 if (get_user_ual(addr, gp)
5439 || !addr)
5440 break;
5441 unlock_user(*q, addr, 0);
5442 }
5443 }
5444 break;
5445 case TARGET_NR_chdir:
5446 if (!(p = lock_user_string(arg1)))
5447 goto efault;
5448 ret = get_errno(chdir(p));
5449 unlock_user(p, arg1, 0);
5450 break;
5451 #ifdef TARGET_NR_time
5452 case TARGET_NR_time:
5453 {
5454 time_t host_time;
5455 ret = get_errno(time(&host_time));
5456 if (!is_error(ret)
5457 && arg1
5458 && put_user_sal(host_time, arg1))
5459 goto efault;
5460 }
5461 break;
5462 #endif
5463 case TARGET_NR_mknod:
5464 if (!(p = lock_user_string(arg1)))
5465 goto efault;
5466 ret = get_errno(mknod(p, arg2, arg3));
5467 unlock_user(p, arg1, 0);
5468 break;
5469 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5470 case TARGET_NR_mknodat:
5471 if (!(p = lock_user_string(arg2)))
5472 goto efault;
5473 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5474 unlock_user(p, arg2, 0);
5475 break;
5476 #endif
5477 case TARGET_NR_chmod:
5478 if (!(p = lock_user_string(arg1)))
5479 goto efault;
5480 ret = get_errno(chmod(p, arg2));
5481 unlock_user(p, arg1, 0);
5482 break;
5483 #ifdef TARGET_NR_break
5484 case TARGET_NR_break:
5485 goto unimplemented;
5486 #endif
5487 #ifdef TARGET_NR_oldstat
5488 case TARGET_NR_oldstat:
5489 goto unimplemented;
5490 #endif
5491 case TARGET_NR_lseek:
5492 ret = get_errno(lseek(arg1, arg2, arg3));
5493 break;
5494 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5495 /* Alpha specific */
5496 case TARGET_NR_getxpid:
5497 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5498 ret = get_errno(getpid());
5499 break;
5500 #endif
5501 #ifdef TARGET_NR_getpid
5502 case TARGET_NR_getpid:
5503 ret = get_errno(getpid());
5504 break;
5505 #endif
5506 case TARGET_NR_mount:
5507 {
5508 /* need to look at the data field */
5509 void *p2, *p3;
5510 p = lock_user_string(arg1);
5511 p2 = lock_user_string(arg2);
5512 p3 = lock_user_string(arg3);
5513 if (!p || !p2 || !p3)
5514 ret = -TARGET_EFAULT;
5515 else {
5516 /* FIXME - arg5 should be locked, but it isn't clear how to
5517 * do that since it's not guaranteed to be a NULL-terminated
5518 * string.
5519 */
5520 if ( ! arg5 )
5521 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5522 else
5523 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5524 }
5525 unlock_user(p, arg1, 0);
5526 unlock_user(p2, arg2, 0);
5527 unlock_user(p3, arg3, 0);
5528 break;
5529 }
5530 #ifdef TARGET_NR_umount
5531 case TARGET_NR_umount:
5532 if (!(p = lock_user_string(arg1)))
5533 goto efault;
5534 ret = get_errno(umount(p));
5535 unlock_user(p, arg1, 0);
5536 break;
5537 #endif
5538 #ifdef TARGET_NR_stime /* not on alpha */
5539 case TARGET_NR_stime:
5540 {
5541 time_t host_time;
5542 if (get_user_sal(host_time, arg1))
5543 goto efault;
5544 ret = get_errno(stime(&host_time));
5545 }
5546 break;
5547 #endif
5548 case TARGET_NR_ptrace:
5549 goto unimplemented;
5550 #ifdef TARGET_NR_alarm /* not on alpha */
5551 case TARGET_NR_alarm:
5552 ret = alarm(arg1);
5553 break;
5554 #endif
5555 #ifdef TARGET_NR_oldfstat
5556 case TARGET_NR_oldfstat:
5557 goto unimplemented;
5558 #endif
5559 #ifdef TARGET_NR_pause /* not on alpha */
5560 case TARGET_NR_pause:
5561 ret = get_errno(pause());
5562 break;
5563 #endif
5564 #ifdef TARGET_NR_utime
5565 case TARGET_NR_utime:
5566 {
5567 struct utimbuf tbuf, *host_tbuf;
5568 struct target_utimbuf *target_tbuf;
5569 if (arg2) {
5570 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5571 goto efault;
5572 tbuf.actime = tswapal(target_tbuf->actime);
5573 tbuf.modtime = tswapal(target_tbuf->modtime);
5574 unlock_user_struct(target_tbuf, arg2, 0);
5575 host_tbuf = &tbuf;
5576 } else {
5577 host_tbuf = NULL;
5578 }
5579 if (!(p = lock_user_string(arg1)))
5580 goto efault;
5581 ret = get_errno(utime(p, host_tbuf));
5582 unlock_user(p, arg1, 0);
5583 }
5584 break;
5585 #endif
5586 case TARGET_NR_utimes:
5587 {
5588 struct timeval *tvp, tv[2];
5589 if (arg2) {
5590 if (copy_from_user_timeval(&tv[0], arg2)
5591 || copy_from_user_timeval(&tv[1],
5592 arg2 + sizeof(struct target_timeval)))
5593 goto efault;
5594 tvp = tv;
5595 } else {
5596 tvp = NULL;
5597 }
5598 if (!(p = lock_user_string(arg1)))
5599 goto efault;
5600 ret = get_errno(utimes(p, tvp));
5601 unlock_user(p, arg1, 0);
5602 }
5603 break;
5604 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5605 case TARGET_NR_futimesat:
5606 {
5607 struct timeval *tvp, tv[2];
5608 if (arg3) {
5609 if (copy_from_user_timeval(&tv[0], arg3)
5610 || copy_from_user_timeval(&tv[1],
5611 arg3 + sizeof(struct target_timeval)))
5612 goto efault;
5613 tvp = tv;
5614 } else {
5615 tvp = NULL;
5616 }
5617 if (!(p = lock_user_string(arg2)))
5618 goto efault;
5619 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5620 unlock_user(p, arg2, 0);
5621 }
5622 break;
5623 #endif
5624 #ifdef TARGET_NR_stty
5625 case TARGET_NR_stty:
5626 goto unimplemented;
5627 #endif
5628 #ifdef TARGET_NR_gtty
5629 case TARGET_NR_gtty:
5630 goto unimplemented;
5631 #endif
5632 case TARGET_NR_access:
5633 if (!(p = lock_user_string(arg1)))
5634 goto efault;
5635 ret = get_errno(access(path(p), arg2));
5636 unlock_user(p, arg1, 0);
5637 break;
5638 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5639 case TARGET_NR_faccessat:
5640 if (!(p = lock_user_string(arg2)))
5641 goto efault;
5642 ret = get_errno(sys_faccessat(arg1, p, arg3));
5643 unlock_user(p, arg2, 0);
5644 break;
5645 #endif
5646 #ifdef TARGET_NR_nice /* not on alpha */
5647 case TARGET_NR_nice:
5648 ret = get_errno(nice(arg1));
5649 break;
5650 #endif
5651 #ifdef TARGET_NR_ftime
5652 case TARGET_NR_ftime:
5653 goto unimplemented;
5654 #endif
5655 case TARGET_NR_sync:
5656 sync();
5657 ret = 0;
5658 break;
5659 case TARGET_NR_kill:
5660 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5661 break;
5662 case TARGET_NR_rename:
5663 {
5664 void *p2;
5665 p = lock_user_string(arg1);
5666 p2 = lock_user_string(arg2);
5667 if (!p || !p2)
5668 ret = -TARGET_EFAULT;
5669 else
5670 ret = get_errno(rename(p, p2));
5671 unlock_user(p2, arg2, 0);
5672 unlock_user(p, arg1, 0);
5673 }
5674 break;
5675 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5676 case TARGET_NR_renameat:
5677 {
5678 void *p2;
5679 p = lock_user_string(arg2);
5680 p2 = lock_user_string(arg4);
5681 if (!p || !p2)
5682 ret = -TARGET_EFAULT;
5683 else
5684 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5685 unlock_user(p2, arg4, 0);
5686 unlock_user(p, arg2, 0);
5687 }
5688 break;
5689 #endif
5690 case TARGET_NR_mkdir:
5691 if (!(p = lock_user_string(arg1)))
5692 goto efault;
5693 ret = get_errno(mkdir(p, arg2));
5694 unlock_user(p, arg1, 0);
5695 break;
5696 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5697 case TARGET_NR_mkdirat:
5698 if (!(p = lock_user_string(arg2)))
5699 goto efault;
5700 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5701 unlock_user(p, arg2, 0);
5702 break;
5703 #endif
5704 case TARGET_NR_rmdir:
5705 if (!(p = lock_user_string(arg1)))
5706 goto efault;
5707 ret = get_errno(rmdir(p));
5708 unlock_user(p, arg1, 0);
5709 break;
5710 case TARGET_NR_dup:
5711 ret = get_errno(dup(arg1));
5712 break;
5713 case TARGET_NR_pipe:
5714 ret = do_pipe(cpu_env, arg1, 0, 0);
5715 break;
5716 #ifdef TARGET_NR_pipe2
5717 case TARGET_NR_pipe2:
5718 ret = do_pipe(cpu_env, arg1,
5719 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5720 break;
5721 #endif
5722 case TARGET_NR_times:
5723 {
5724 struct target_tms *tmsp;
5725 struct tms tms;
5726 ret = get_errno(times(&tms));
5727 if (arg1) {
5728 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5729 if (!tmsp)
5730 goto efault;
5731 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5732 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5733 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5734 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5735 }
5736 if (!is_error(ret))
5737 ret = host_to_target_clock_t(ret);
5738 }
5739 break;
5740 #ifdef TARGET_NR_prof
5741 case TARGET_NR_prof:
5742 goto unimplemented;
5743 #endif
5744 #ifdef TARGET_NR_signal
5745 case TARGET_NR_signal:
5746 goto unimplemented;
5747 #endif
5748 case TARGET_NR_acct:
5749 if (arg1 == 0) {
5750 ret = get_errno(acct(NULL));
5751 } else {
5752 if (!(p = lock_user_string(arg1)))
5753 goto efault;
5754 ret = get_errno(acct(path(p)));
5755 unlock_user(p, arg1, 0);
5756 }
5757 break;
5758 #ifdef TARGET_NR_umount2 /* not on alpha */
5759 case TARGET_NR_umount2:
5760 if (!(p = lock_user_string(arg1)))
5761 goto efault;
5762 ret = get_errno(umount2(p, arg2));
5763 unlock_user(p, arg1, 0);
5764 break;
5765 #endif
5766 #ifdef TARGET_NR_lock
5767 case TARGET_NR_lock:
5768 goto unimplemented;
5769 #endif
5770 case TARGET_NR_ioctl:
5771 ret = do_ioctl(arg1, arg2, arg3);
5772 break;
5773 case TARGET_NR_fcntl:
5774 ret = do_fcntl(arg1, arg2, arg3);
5775 break;
5776 #ifdef TARGET_NR_mpx
5777 case TARGET_NR_mpx:
5778 goto unimplemented;
5779 #endif
5780 case TARGET_NR_setpgid:
5781 ret = get_errno(setpgid(arg1, arg2));
5782 break;
5783 #ifdef TARGET_NR_ulimit
5784 case TARGET_NR_ulimit:
5785 goto unimplemented;
5786 #endif
5787 #ifdef TARGET_NR_oldolduname
5788 case TARGET_NR_oldolduname:
5789 goto unimplemented;
5790 #endif
5791 case TARGET_NR_umask:
5792 ret = get_errno(umask(arg1));
5793 break;
5794 case TARGET_NR_chroot:
5795 if (!(p = lock_user_string(arg1)))
5796 goto efault;
5797 ret = get_errno(chroot(p));
5798 unlock_user(p, arg1, 0);
5799 break;
5800 case TARGET_NR_ustat:
5801 goto unimplemented;
5802 case TARGET_NR_dup2:
5803 ret = get_errno(dup2(arg1, arg2));
5804 break;
5805 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5806 case TARGET_NR_dup3:
5807 ret = get_errno(dup3(arg1, arg2, arg3));
5808 break;
5809 #endif
5810 #ifdef TARGET_NR_getppid /* not on alpha */
5811 case TARGET_NR_getppid:
5812 ret = get_errno(getppid());
5813 break;
5814 #endif
5815 case TARGET_NR_getpgrp:
5816 ret = get_errno(getpgrp());
5817 break;
5818 case TARGET_NR_setsid:
5819 ret = get_errno(setsid());
5820 break;
5821 #ifdef TARGET_NR_sigaction
5822 case TARGET_NR_sigaction:
5823 {
5824 #if defined(TARGET_ALPHA)
5825 struct target_sigaction act, oact, *pact = 0;
5826 struct target_old_sigaction *old_act;
5827 if (arg2) {
5828 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5829 goto efault;
5830 act._sa_handler = old_act->_sa_handler;
5831 target_siginitset(&act.sa_mask, old_act->sa_mask);
5832 act.sa_flags = old_act->sa_flags;
5833 act.sa_restorer = 0;
5834 unlock_user_struct(old_act, arg2, 0);
5835 pact = &act;
5836 }
5837 ret = get_errno(do_sigaction(arg1, pact, &oact));
5838 if (!is_error(ret) && arg3) {
5839 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5840 goto efault;
5841 old_act->_sa_handler = oact._sa_handler;
5842 old_act->sa_mask = oact.sa_mask.sig[0];
5843 old_act->sa_flags = oact.sa_flags;
5844 unlock_user_struct(old_act, arg3, 1);
5845 }
5846 #elif defined(TARGET_MIPS)
5847 struct target_sigaction act, oact, *pact, *old_act;
5848
5849 if (arg2) {
5850 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5851 goto efault;
5852 act._sa_handler = old_act->_sa_handler;
5853 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5854 act.sa_flags = old_act->sa_flags;
5855 unlock_user_struct(old_act, arg2, 0);
5856 pact = &act;
5857 } else {
5858 pact = NULL;
5859 }
5860
5861 ret = get_errno(do_sigaction(arg1, pact, &oact));
5862
5863 if (!is_error(ret) && arg3) {
5864 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5865 goto efault;
5866 old_act->_sa_handler = oact._sa_handler;
5867 old_act->sa_flags = oact.sa_flags;
5868 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5869 old_act->sa_mask.sig[1] = 0;
5870 old_act->sa_mask.sig[2] = 0;
5871 old_act->sa_mask.sig[3] = 0;
5872 unlock_user_struct(old_act, arg3, 1);
5873 }
5874 #else
5875 struct target_old_sigaction *old_act;
5876 struct target_sigaction act, oact, *pact;
5877 if (arg2) {
5878 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5879 goto efault;
5880 act._sa_handler = old_act->_sa_handler;
5881 target_siginitset(&act.sa_mask, old_act->sa_mask);
5882 act.sa_flags = old_act->sa_flags;
5883 act.sa_restorer = old_act->sa_restorer;
5884 unlock_user_struct(old_act, arg2, 0);
5885 pact = &act;
5886 } else {
5887 pact = NULL;
5888 }
5889 ret = get_errno(do_sigaction(arg1, pact, &oact));
5890 if (!is_error(ret) && arg3) {
5891 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5892 goto efault;
5893 old_act->_sa_handler = oact._sa_handler;
5894 old_act->sa_mask = oact.sa_mask.sig[0];
5895 old_act->sa_flags = oact.sa_flags;
5896 old_act->sa_restorer = oact.sa_restorer;
5897 unlock_user_struct(old_act, arg3, 1);
5898 }
5899 #endif
5900 }
5901 break;
5902 #endif
5903 case TARGET_NR_rt_sigaction:
5904 {
5905 #if defined(TARGET_ALPHA)
5906 struct target_sigaction act, oact, *pact = 0;
5907 struct target_rt_sigaction *rt_act;
5908 /* ??? arg4 == sizeof(sigset_t). */
5909 if (arg2) {
5910 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5911 goto efault;
5912 act._sa_handler = rt_act->_sa_handler;
5913 act.sa_mask = rt_act->sa_mask;
5914 act.sa_flags = rt_act->sa_flags;
5915 act.sa_restorer = arg5;
5916 unlock_user_struct(rt_act, arg2, 0);
5917 pact = &act;
5918 }
5919 ret = get_errno(do_sigaction(arg1, pact, &oact));
5920 if (!is_error(ret) && arg3) {
5921 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5922 goto efault;
5923 rt_act->_sa_handler = oact._sa_handler;
5924 rt_act->sa_mask = oact.sa_mask;
5925 rt_act->sa_flags = oact.sa_flags;
5926 unlock_user_struct(rt_act, arg3, 1);
5927 }
5928 #else
5929 struct target_sigaction *act;
5930 struct target_sigaction *oact;
5931
5932 if (arg2) {
5933 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5934 goto efault;
5935 } else
5936 act = NULL;
5937 if (arg3) {
5938 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5939 ret = -TARGET_EFAULT;
5940 goto rt_sigaction_fail;
5941 }
5942 } else
5943 oact = NULL;
5944 ret = get_errno(do_sigaction(arg1, act, oact));
5945 rt_sigaction_fail:
5946 if (act)
5947 unlock_user_struct(act, arg2, 0);
5948 if (oact)
5949 unlock_user_struct(oact, arg3, 1);
5950 #endif
5951 }
5952 break;
5953 #ifdef TARGET_NR_sgetmask /* not on alpha */
5954 case TARGET_NR_sgetmask:
5955 {
5956 sigset_t cur_set;
5957 abi_ulong target_set;
5958 sigprocmask(0, NULL, &cur_set);
5959 host_to_target_old_sigset(&target_set, &cur_set);
5960 ret = target_set;
5961 }
5962 break;
5963 #endif
5964 #ifdef TARGET_NR_ssetmask /* not on alpha */
5965 case TARGET_NR_ssetmask:
5966 {
5967 sigset_t set, oset, cur_set;
5968 abi_ulong target_set = arg1;
5969 sigprocmask(0, NULL, &cur_set);
5970 target_to_host_old_sigset(&set, &target_set);
5971 sigorset(&set, &set, &cur_set);
5972 sigprocmask(SIG_SETMASK, &set, &oset);
5973 host_to_target_old_sigset(&target_set, &oset);
5974 ret = target_set;
5975 }
5976 break;
5977 #endif
5978 #ifdef TARGET_NR_sigprocmask
5979 case TARGET_NR_sigprocmask:
5980 {
5981 #if defined(TARGET_ALPHA)
5982 sigset_t set, oldset;
5983 abi_ulong mask;
5984 int how;
5985
5986 switch (arg1) {
5987 case TARGET_SIG_BLOCK:
5988 how = SIG_BLOCK;
5989 break;
5990 case TARGET_SIG_UNBLOCK:
5991 how = SIG_UNBLOCK;
5992 break;
5993 case TARGET_SIG_SETMASK:
5994 how = SIG_SETMASK;
5995 break;
5996 default:
5997 ret = -TARGET_EINVAL;
5998 goto fail;
5999 }
6000 mask = arg2;
6001 target_to_host_old_sigset(&set, &mask);
6002
6003 ret = get_errno(sigprocmask(how, &set, &oldset));
6004 if (!is_error(ret)) {
6005 host_to_target_old_sigset(&mask, &oldset);
6006 ret = mask;
6007 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6008 }
6009 #else
6010 sigset_t set, oldset, *set_ptr;
6011 int how;
6012
6013 if (arg2) {
6014 switch (arg1) {
6015 case TARGET_SIG_BLOCK:
6016 how = SIG_BLOCK;
6017 break;
6018 case TARGET_SIG_UNBLOCK:
6019 how = SIG_UNBLOCK;
6020 break;
6021 case TARGET_SIG_SETMASK:
6022 how = SIG_SETMASK;
6023 break;
6024 default:
6025 ret = -TARGET_EINVAL;
6026 goto fail;
6027 }
6028 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6029 goto efault;
6030 target_to_host_old_sigset(&set, p);
6031 unlock_user(p, arg2, 0);
6032 set_ptr = &set;
6033 } else {
6034 how = 0;
6035 set_ptr = NULL;
6036 }
6037 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6038 if (!is_error(ret) && arg3) {
6039 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6040 goto efault;
6041 host_to_target_old_sigset(p, &oldset);
6042 unlock_user(p, arg3, sizeof(target_sigset_t));
6043 }
6044 #endif
6045 }
6046 break;
6047 #endif
6048 case TARGET_NR_rt_sigprocmask:
6049 {
6050 int how = arg1;
6051 sigset_t set, oldset, *set_ptr;
6052
6053 if (arg2) {
6054 switch(how) {
6055 case TARGET_SIG_BLOCK:
6056 how = SIG_BLOCK;
6057 break;
6058 case TARGET_SIG_UNBLOCK:
6059 how = SIG_UNBLOCK;
6060 break;
6061 case TARGET_SIG_SETMASK:
6062 how = SIG_SETMASK;
6063 break;
6064 default:
6065 ret = -TARGET_EINVAL;
6066 goto fail;
6067 }
6068 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6069 goto efault;
6070 target_to_host_sigset(&set, p);
6071 unlock_user(p, arg2, 0);
6072 set_ptr = &set;
6073 } else {
6074 how = 0;
6075 set_ptr = NULL;
6076 }
6077 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6078 if (!is_error(ret) && arg3) {
6079 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6080 goto efault;
6081 host_to_target_sigset(p, &oldset);
6082 unlock_user(p, arg3, sizeof(target_sigset_t));
6083 }
6084 }
6085 break;
6086 #ifdef TARGET_NR_sigpending
6087 case TARGET_NR_sigpending:
6088 {
6089 sigset_t set;
6090 ret = get_errno(sigpending(&set));
6091 if (!is_error(ret)) {
6092 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6093 goto efault;
6094 host_to_target_old_sigset(p, &set);
6095 unlock_user(p, arg1, sizeof(target_sigset_t));
6096 }
6097 }
6098 break;
6099 #endif
6100 case TARGET_NR_rt_sigpending:
6101 {
6102 sigset_t set;
6103 ret = get_errno(sigpending(&set));
6104 if (!is_error(ret)) {
6105 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6106 goto efault;
6107 host_to_target_sigset(p, &set);
6108 unlock_user(p, arg1, sizeof(target_sigset_t));
6109 }
6110 }
6111 break;
6112 #ifdef TARGET_NR_sigsuspend
6113 case TARGET_NR_sigsuspend:
6114 {
6115 sigset_t set;
6116 #if defined(TARGET_ALPHA)
6117 abi_ulong mask = arg1;
6118 target_to_host_old_sigset(&set, &mask);
6119 #else
6120 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6121 goto efault;
6122 target_to_host_old_sigset(&set, p);
6123 unlock_user(p, arg1, 0);
6124 #endif
6125 ret = get_errno(sigsuspend(&set));
6126 }
6127 break;
6128 #endif
6129 case TARGET_NR_rt_sigsuspend:
6130 {
6131 sigset_t set;
6132 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6133 goto efault;
6134 target_to_host_sigset(&set, p);
6135 unlock_user(p, arg1, 0);
6136 ret = get_errno(sigsuspend(&set));
6137 }
6138 break;
6139 case TARGET_NR_rt_sigtimedwait:
6140 {
6141 sigset_t set;
6142 struct timespec uts, *puts;
6143 siginfo_t uinfo;
6144
6145 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6146 goto efault;
6147 target_to_host_sigset(&set, p);
6148 unlock_user(p, arg1, 0);
6149 if (arg3) {
6150 puts = &uts;
6151 target_to_host_timespec(puts, arg3);
6152 } else {
6153 puts = NULL;
6154 }
6155 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6156 if (!is_error(ret) && arg2) {
6157 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6158 goto efault;
6159 host_to_target_siginfo(p, &uinfo);
6160 unlock_user(p, arg2, sizeof(target_siginfo_t));
6161 }
6162 }
6163 break;
6164 case TARGET_NR_rt_sigqueueinfo:
6165 {
6166 siginfo_t uinfo;
6167 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6168 goto efault;
6169 target_to_host_siginfo(&uinfo, p);
6170 unlock_user(p, arg1, 0);
6171 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6172 }
6173 break;
6174 #ifdef TARGET_NR_sigreturn
6175 case TARGET_NR_sigreturn:
6176 /* NOTE: ret is eax, so not transcoding must be done */
6177 ret = do_sigreturn(cpu_env);
6178 break;
6179 #endif
6180 case TARGET_NR_rt_sigreturn:
6181 /* NOTE: ret is eax, so not transcoding must be done */
6182 ret = do_rt_sigreturn(cpu_env);
6183 break;
6184 case TARGET_NR_sethostname:
6185 if (!(p = lock_user_string(arg1)))
6186 goto efault;
6187 ret = get_errno(sethostname(p, arg2));
6188 unlock_user(p, arg1, 0);
6189 break;
6190 case TARGET_NR_setrlimit:
6191 {
6192 int resource = target_to_host_resource(arg1);
6193 struct target_rlimit *target_rlim;
6194 struct rlimit rlim;
6195 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6196 goto efault;
6197 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6198 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6199 unlock_user_struct(target_rlim, arg2, 0);
6200 ret = get_errno(setrlimit(resource, &rlim));
6201 }
6202 break;
6203 case TARGET_NR_getrlimit:
6204 {
6205 int resource = target_to_host_resource(arg1);
6206 struct target_rlimit *target_rlim;
6207 struct rlimit rlim;
6208
6209 ret = get_errno(getrlimit(resource, &rlim));
6210 if (!is_error(ret)) {
6211 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6212 goto efault;
6213 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6214 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6215 unlock_user_struct(target_rlim, arg2, 1);
6216 }
6217 }
6218 break;
6219 case TARGET_NR_getrusage:
6220 {
6221 struct rusage rusage;
6222 ret = get_errno(getrusage(arg1, &rusage));
6223 if (!is_error(ret)) {
6224 host_to_target_rusage(arg2, &rusage);
6225 }
6226 }
6227 break;
6228 case TARGET_NR_gettimeofday:
6229 {
6230 struct timeval tv;
6231 ret = get_errno(gettimeofday(&tv, NULL));
6232 if (!is_error(ret)) {
6233 if (copy_to_user_timeval(arg1, &tv))
6234 goto efault;
6235 }
6236 }
6237 break;
6238 case TARGET_NR_settimeofday:
6239 {
6240 struct timeval tv;
6241 if (copy_from_user_timeval(&tv, arg1))
6242 goto efault;
6243 ret = get_errno(settimeofday(&tv, NULL));
6244 }
6245 break;
6246 #if defined(TARGET_NR_select)
6247 case TARGET_NR_select:
6248 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6249 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6250 #else
6251 {
6252 struct target_sel_arg_struct *sel;
6253 abi_ulong inp, outp, exp, tvp;
6254 long nsel;
6255
6256 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6257 goto efault;
6258 nsel = tswapal(sel->n);
6259 inp = tswapal(sel->inp);
6260 outp = tswapal(sel->outp);
6261 exp = tswapal(sel->exp);
6262 tvp = tswapal(sel->tvp);
6263 unlock_user_struct(sel, arg1, 0);
6264 ret = do_select(nsel, inp, outp, exp, tvp);
6265 }
6266 #endif
6267 break;
6268 #endif
6269 #ifdef TARGET_NR_pselect6
6270 case TARGET_NR_pselect6:
6271 {
6272 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6273 fd_set rfds, wfds, efds;
6274 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6275 struct timespec ts, *ts_ptr;
6276
6277 /*
6278 * The 6th arg is actually two args smashed together,
6279 * so we cannot use the C library.
6280 */
6281 sigset_t set;
6282 struct {
6283 sigset_t *set;
6284 size_t size;
6285 } sig, *sig_ptr;
6286
6287 abi_ulong arg_sigset, arg_sigsize, *arg7;
6288 target_sigset_t *target_sigset;
6289
6290 n = arg1;
6291 rfd_addr = arg2;
6292 wfd_addr = arg3;
6293 efd_addr = arg4;
6294 ts_addr = arg5;
6295
6296 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6297 if (ret) {
6298 goto fail;
6299 }
6300 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6301 if (ret) {
6302 goto fail;
6303 }
6304 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6305 if (ret) {
6306 goto fail;
6307 }
6308
6309 /*
6310 * This takes a timespec, and not a timeval, so we cannot
6311 * use the do_select() helper ...
6312 */
6313 if (ts_addr) {
6314 if (target_to_host_timespec(&ts, ts_addr)) {
6315 goto efault;
6316 }
6317 ts_ptr = &ts;
6318 } else {
6319 ts_ptr = NULL;
6320 }
6321
6322 /* Extract the two packed args for the sigset */
6323 if (arg6) {
6324 sig_ptr = &sig;
6325 sig.size = _NSIG / 8;
6326
6327 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6328 if (!arg7) {
6329 goto efault;
6330 }
6331 arg_sigset = tswapal(arg7[0]);
6332 arg_sigsize = tswapal(arg7[1]);
6333 unlock_user(arg7, arg6, 0);
6334
6335 if (arg_sigset) {
6336 sig.set = &set;
6337 if (arg_sigsize != sizeof(*target_sigset)) {
6338 /* Like the kernel, we enforce correct size sigsets */
6339 ret = -TARGET_EINVAL;
6340 goto fail;
6341 }
6342 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6343 sizeof(*target_sigset), 1);
6344 if (!target_sigset) {
6345 goto efault;
6346 }
6347 target_to_host_sigset(&set, target_sigset);
6348 unlock_user(target_sigset, arg_sigset, 0);
6349 } else {
6350 sig.set = NULL;
6351 }
6352 } else {
6353 sig_ptr = NULL;
6354 }
6355
6356 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6357 ts_ptr, sig_ptr));
6358
6359 if (!is_error(ret)) {
6360 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6361 goto efault;
6362 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6363 goto efault;
6364 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6365 goto efault;
6366
6367 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6368 goto efault;
6369 }
6370 }
6371 break;
6372 #endif
6373 case TARGET_NR_symlink:
6374 {
6375 void *p2;
6376 p = lock_user_string(arg1);
6377 p2 = lock_user_string(arg2);
6378 if (!p || !p2)
6379 ret = -TARGET_EFAULT;
6380 else
6381 ret = get_errno(symlink(p, p2));
6382 unlock_user(p2, arg2, 0);
6383 unlock_user(p, arg1, 0);
6384 }
6385 break;
6386 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6387 case TARGET_NR_symlinkat:
6388 {
6389 void *p2;
6390 p = lock_user_string(arg1);
6391 p2 = lock_user_string(arg3);
6392 if (!p || !p2)
6393 ret = -TARGET_EFAULT;
6394 else
6395 ret = get_errno(sys_symlinkat(p, arg2, p2));
6396 unlock_user(p2, arg3, 0);
6397 unlock_user(p, arg1, 0);
6398 }
6399 break;
6400 #endif
6401 #ifdef TARGET_NR_oldlstat
6402 case TARGET_NR_oldlstat:
6403 goto unimplemented;
6404 #endif
6405 case TARGET_NR_readlink:
6406 {
6407 void *p2, *temp;
6408 p = lock_user_string(arg1);
6409 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6410 if (!p || !p2)
6411 ret = -TARGET_EFAULT;
6412 else {
6413 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6414 char real[PATH_MAX];
6415 temp = realpath(exec_path,real);
6416 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6417 snprintf((char *)p2, arg3, "%s", real);
6418 }
6419 else
6420 ret = get_errno(readlink(path(p), p2, arg3));
6421 }
6422 unlock_user(p2, arg2, ret);
6423 unlock_user(p, arg1, 0);
6424 }
6425 break;
6426 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6427 case TARGET_NR_readlinkat:
6428 {
6429 void *p2;
6430 p = lock_user_string(arg2);
6431 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6432 if (!p || !p2)
6433 ret = -TARGET_EFAULT;
6434 else
6435 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6436 unlock_user(p2, arg3, ret);
6437 unlock_user(p, arg2, 0);
6438 }
6439 break;
6440 #endif
6441 #ifdef TARGET_NR_uselib
6442 case TARGET_NR_uselib:
6443 goto unimplemented;
6444 #endif
6445 #ifdef TARGET_NR_swapon
6446 case TARGET_NR_swapon:
6447 if (!(p = lock_user_string(arg1)))
6448 goto efault;
6449 ret = get_errno(swapon(p, arg2));
6450 unlock_user(p, arg1, 0);
6451 break;
6452 #endif
6453 case TARGET_NR_reboot:
6454 if (!(p = lock_user_string(arg4)))
6455 goto efault;
6456 ret = reboot(arg1, arg2, arg3, p);
6457 unlock_user(p, arg4, 0);
6458 break;
6459 #ifdef TARGET_NR_readdir
6460 case TARGET_NR_readdir:
6461 goto unimplemented;
6462 #endif
6463 #ifdef TARGET_NR_mmap
6464 case TARGET_NR_mmap:
6465 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6466 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6467 || defined(TARGET_S390X)
6468 {
6469 abi_ulong *v;
6470 abi_ulong v1, v2, v3, v4, v5, v6;
6471 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6472 goto efault;
6473 v1 = tswapal(v[0]);
6474 v2 = tswapal(v[1]);
6475 v3 = tswapal(v[2]);
6476 v4 = tswapal(v[3]);
6477 v5 = tswapal(v[4]);
6478 v6 = tswapal(v[5]);
6479 unlock_user(v, arg1, 0);
6480 ret = get_errno(target_mmap(v1, v2, v3,
6481 target_to_host_bitmask(v4, mmap_flags_tbl),
6482 v5, v6));
6483 }
6484 #else
6485 ret = get_errno(target_mmap(arg1, arg2, arg3,
6486 target_to_host_bitmask(arg4, mmap_flags_tbl),
6487 arg5,
6488 arg6));
6489 #endif
6490 break;
6491 #endif
6492 #ifdef TARGET_NR_mmap2
6493 case TARGET_NR_mmap2:
6494 #ifndef MMAP_SHIFT
6495 #define MMAP_SHIFT 12
6496 #endif
6497 ret = get_errno(target_mmap(arg1, arg2, arg3,
6498 target_to_host_bitmask(arg4, mmap_flags_tbl),
6499 arg5,
6500 arg6 << MMAP_SHIFT));
6501 break;
6502 #endif
6503 case TARGET_NR_munmap:
6504 ret = get_errno(target_munmap(arg1, arg2));
6505 break;
6506 case TARGET_NR_mprotect:
6507 {
6508 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6509 /* Special hack to detect libc making the stack executable. */
6510 if ((arg3 & PROT_GROWSDOWN)
6511 && arg1 >= ts->info->stack_limit
6512 && arg1 <= ts->info->start_stack) {
6513 arg3 &= ~PROT_GROWSDOWN;
6514 arg2 = arg2 + arg1 - ts->info->stack_limit;
6515 arg1 = ts->info->stack_limit;
6516 }
6517 }
6518 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6519 break;
6520 #ifdef TARGET_NR_mremap
6521 case TARGET_NR_mremap:
6522 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6523 break;
6524 #endif
6525 /* ??? msync/mlock/munlock are broken for softmmu. */
6526 #ifdef TARGET_NR_msync
6527 case TARGET_NR_msync:
6528 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6529 break;
6530 #endif
6531 #ifdef TARGET_NR_mlock
6532 case TARGET_NR_mlock:
6533 ret = get_errno(mlock(g2h(arg1), arg2));
6534 break;
6535 #endif
6536 #ifdef TARGET_NR_munlock
6537 case TARGET_NR_munlock:
6538 ret = get_errno(munlock(g2h(arg1), arg2));
6539 break;
6540 #endif
6541 #ifdef TARGET_NR_mlockall
6542 case TARGET_NR_mlockall:
6543 ret = get_errno(mlockall(arg1));
6544 break;
6545 #endif
6546 #ifdef TARGET_NR_munlockall
6547 case TARGET_NR_munlockall:
6548 ret = get_errno(munlockall());
6549 break;
6550 #endif
6551 case TARGET_NR_truncate:
6552 if (!(p = lock_user_string(arg1)))
6553 goto efault;
6554 ret = get_errno(truncate(p, arg2));
6555 unlock_user(p, arg1, 0);
6556 break;
6557 case TARGET_NR_ftruncate:
6558 ret = get_errno(ftruncate(arg1, arg2));
6559 break;
6560 case TARGET_NR_fchmod:
6561 ret = get_errno(fchmod(arg1, arg2));
6562 break;
6563 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6564 case TARGET_NR_fchmodat:
6565 if (!(p = lock_user_string(arg2)))
6566 goto efault;
6567 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6568 unlock_user(p, arg2, 0);
6569 break;
6570 #endif
6571 case TARGET_NR_getpriority:
6572 /* Note that negative values are valid for getpriority, so we must
6573 differentiate based on errno settings. */
6574 errno = 0;
6575 ret = getpriority(arg1, arg2);
6576 if (ret == -1 && errno != 0) {
6577 ret = -host_to_target_errno(errno);
6578 break;
6579 }
6580 #ifdef TARGET_ALPHA
6581 /* Return value is the unbiased priority. Signal no error. */
6582 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6583 #else
6584 /* Return value is a biased priority to avoid negative numbers. */
6585 ret = 20 - ret;
6586 #endif
6587 break;
6588 case TARGET_NR_setpriority:
6589 ret = get_errno(setpriority(arg1, arg2, arg3));
6590 break;
6591 #ifdef TARGET_NR_profil
6592 case TARGET_NR_profil:
6593 goto unimplemented;
6594 #endif
6595 case TARGET_NR_statfs:
6596 if (!(p = lock_user_string(arg1)))
6597 goto efault;
6598 ret = get_errno(statfs(path(p), &stfs));
6599 unlock_user(p, arg1, 0);
6600 convert_statfs:
6601 if (!is_error(ret)) {
6602 struct target_statfs *target_stfs;
6603
6604 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6605 goto efault;
6606 __put_user(stfs.f_type, &target_stfs->f_type);
6607 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6608 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6609 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6610 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6611 __put_user(stfs.f_files, &target_stfs->f_files);
6612 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6613 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6614 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6615 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6616 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6617 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6618 unlock_user_struct(target_stfs, arg2, 1);
6619 }
6620 break;
6621 case TARGET_NR_fstatfs:
6622 ret = get_errno(fstatfs(arg1, &stfs));
6623 goto convert_statfs;
6624 #ifdef TARGET_NR_statfs64
6625 case TARGET_NR_statfs64:
6626 if (!(p = lock_user_string(arg1)))
6627 goto efault;
6628 ret = get_errno(statfs(path(p), &stfs));
6629 unlock_user(p, arg1, 0);
6630 convert_statfs64:
6631 if (!is_error(ret)) {
6632 struct target_statfs64 *target_stfs;
6633
6634 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6635 goto efault;
6636 __put_user(stfs.f_type, &target_stfs->f_type);
6637 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6638 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6639 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6640 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6641 __put_user(stfs.f_files, &target_stfs->f_files);
6642 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6643 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6644 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6645 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6646 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6647 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6648 unlock_user_struct(target_stfs, arg3, 1);
6649 }
6650 break;
6651 case TARGET_NR_fstatfs64:
6652 ret = get_errno(fstatfs(arg1, &stfs));
6653 goto convert_statfs64;
6654 #endif
6655 #ifdef TARGET_NR_ioperm
6656 case TARGET_NR_ioperm:
6657 goto unimplemented;
6658 #endif
6659 #ifdef TARGET_NR_socketcall
6660 case TARGET_NR_socketcall:
6661 ret = do_socketcall(arg1, arg2);
6662 break;
6663 #endif
6664 #ifdef TARGET_NR_accept
6665 case TARGET_NR_accept:
6666 ret = do_accept(arg1, arg2, arg3);
6667 break;
6668 #endif
6669 #ifdef TARGET_NR_bind
6670 case TARGET_NR_bind:
6671 ret = do_bind(arg1, arg2, arg3);
6672 break;
6673 #endif
6674 #ifdef TARGET_NR_connect
6675 case TARGET_NR_connect:
6676 ret = do_connect(arg1, arg2, arg3);
6677 break;
6678 #endif
6679 #ifdef TARGET_NR_getpeername
6680 case TARGET_NR_getpeername:
6681 ret = do_getpeername(arg1, arg2, arg3);
6682 break;
6683 #endif
6684 #ifdef TARGET_NR_getsockname
6685 case TARGET_NR_getsockname:
6686 ret = do_getsockname(arg1, arg2, arg3);
6687 break;
6688 #endif
6689 #ifdef TARGET_NR_getsockopt
6690 case TARGET_NR_getsockopt:
6691 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6692 break;
6693 #endif
6694 #ifdef TARGET_NR_listen
6695 case TARGET_NR_listen:
6696 ret = get_errno(listen(arg1, arg2));
6697 break;
6698 #endif
6699 #ifdef TARGET_NR_recv
6700 case TARGET_NR_recv:
6701 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6702 break;
6703 #endif
6704 #ifdef TARGET_NR_recvfrom
6705 case TARGET_NR_recvfrom:
6706 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6707 break;
6708 #endif
6709 #ifdef TARGET_NR_recvmsg
6710 case TARGET_NR_recvmsg:
6711 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6712 break;
6713 #endif
6714 #ifdef TARGET_NR_send
6715 case TARGET_NR_send:
6716 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6717 break;
6718 #endif
6719 #ifdef TARGET_NR_sendmsg
6720 case TARGET_NR_sendmsg:
6721 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6722 break;
6723 #endif
6724 #ifdef TARGET_NR_sendto
6725 case TARGET_NR_sendto:
6726 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6727 break;
6728 #endif
6729 #ifdef TARGET_NR_shutdown
6730 case TARGET_NR_shutdown:
6731 ret = get_errno(shutdown(arg1, arg2));
6732 break;
6733 #endif
6734 #ifdef TARGET_NR_socket
6735 case TARGET_NR_socket:
6736 ret = do_socket(arg1, arg2, arg3);
6737 break;
6738 #endif
6739 #ifdef TARGET_NR_socketpair
6740 case TARGET_NR_socketpair:
6741 ret = do_socketpair(arg1, arg2, arg3, arg4);
6742 break;
6743 #endif
6744 #ifdef TARGET_NR_setsockopt
6745 case TARGET_NR_setsockopt:
6746 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6747 break;
6748 #endif
6749
6750 case TARGET_NR_syslog:
6751 if (!(p = lock_user_string(arg2)))
6752 goto efault;
6753 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6754 unlock_user(p, arg2, 0);
6755 break;
6756
6757 case TARGET_NR_setitimer:
6758 {
6759 struct itimerval value, ovalue, *pvalue;
6760
6761 if (arg2) {
6762 pvalue = &value;
6763 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6764 || copy_from_user_timeval(&pvalue->it_value,
6765 arg2 + sizeof(struct target_timeval)))
6766 goto efault;
6767 } else {
6768 pvalue = NULL;
6769 }
6770 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6771 if (!is_error(ret) && arg3) {
6772 if (copy_to_user_timeval(arg3,
6773 &ovalue.it_interval)
6774 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6775 &ovalue.it_value))
6776 goto efault;
6777 }
6778 }
6779 break;
6780 case TARGET_NR_getitimer:
6781 {
6782 struct itimerval value;
6783
6784 ret = get_errno(getitimer(arg1, &value));
6785 if (!is_error(ret) && arg2) {
6786 if (copy_to_user_timeval(arg2,
6787 &value.it_interval)
6788 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6789 &value.it_value))
6790 goto efault;
6791 }
6792 }
6793 break;
6794 case TARGET_NR_stat:
6795 if (!(p = lock_user_string(arg1)))
6796 goto efault;
6797 ret = get_errno(stat(path(p), &st));
6798 unlock_user(p, arg1, 0);
6799 goto do_stat;
6800 case TARGET_NR_lstat:
6801 if (!(p = lock_user_string(arg1)))
6802 goto efault;
6803 ret = get_errno(lstat(path(p), &st));
6804 unlock_user(p, arg1, 0);
6805 goto do_stat;
6806 case TARGET_NR_fstat:
6807 {
6808 ret = get_errno(fstat(arg1, &st));
6809 do_stat:
6810 if (!is_error(ret)) {
6811 struct target_stat *target_st;
6812
6813 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6814 goto efault;
6815 memset(target_st, 0, sizeof(*target_st));
6816 __put_user(st.st_dev, &target_st->st_dev);
6817 __put_user(st.st_ino, &target_st->st_ino);
6818 __put_user(st.st_mode, &target_st->st_mode);
6819 __put_user(st.st_uid, &target_st->st_uid);
6820 __put_user(st.st_gid, &target_st->st_gid);
6821 __put_user(st.st_nlink, &target_st->st_nlink);
6822 __put_user(st.st_rdev, &target_st->st_rdev);
6823 __put_user(st.st_size, &target_st->st_size);
6824 __put_user(st.st_blksize, &target_st->st_blksize);
6825 __put_user(st.st_blocks, &target_st->st_blocks);
6826 __put_user(st.st_atime, &target_st->target_st_atime);
6827 __put_user(st.st_mtime, &target_st->target_st_mtime);
6828 __put_user(st.st_ctime, &target_st->target_st_ctime);
6829 unlock_user_struct(target_st, arg2, 1);
6830 }
6831 }
6832 break;
6833 #ifdef TARGET_NR_olduname
6834 case TARGET_NR_olduname:
6835 goto unimplemented;
6836 #endif
6837 #ifdef TARGET_NR_iopl
6838 case TARGET_NR_iopl:
6839 goto unimplemented;
6840 #endif
6841 case TARGET_NR_vhangup:
6842 ret = get_errno(vhangup());
6843 break;
6844 #ifdef TARGET_NR_idle
6845 case TARGET_NR_idle:
6846 goto unimplemented;
6847 #endif
6848 #ifdef TARGET_NR_syscall
6849 case TARGET_NR_syscall:
6850 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6851 arg6, arg7, arg8, 0);
6852 break;
6853 #endif
6854 case TARGET_NR_wait4:
6855 {
6856 int status;
6857 abi_long status_ptr = arg2;
6858 struct rusage rusage, *rusage_ptr;
6859 abi_ulong target_rusage = arg4;
6860 if (target_rusage)
6861 rusage_ptr = &rusage;
6862 else
6863 rusage_ptr = NULL;
6864 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6865 if (!is_error(ret)) {
6866 if (status_ptr && ret) {
6867 status = host_to_target_waitstatus(status);
6868 if (put_user_s32(status, status_ptr))
6869 goto efault;
6870 }
6871 if (target_rusage)
6872 host_to_target_rusage(target_rusage, &rusage);
6873 }
6874 }
6875 break;
6876 #ifdef TARGET_NR_swapoff
6877 case TARGET_NR_swapoff:
6878 if (!(p = lock_user_string(arg1)))
6879 goto efault;
6880 ret = get_errno(swapoff(p));
6881 unlock_user(p, arg1, 0);
6882 break;
6883 #endif
6884 case TARGET_NR_sysinfo:
6885 {
6886 struct target_sysinfo *target_value;
6887 struct sysinfo value;
6888 ret = get_errno(sysinfo(&value));
6889 if (!is_error(ret) && arg1)
6890 {
6891 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6892 goto efault;
6893 __put_user(value.uptime, &target_value->uptime);
6894 __put_user(value.loads[0], &target_value->loads[0]);
6895 __put_user(value.loads[1], &target_value->loads[1]);
6896 __put_user(value.loads[2], &target_value->loads[2]);
6897 __put_user(value.totalram, &target_value->totalram);
6898 __put_user(value.freeram, &target_value->freeram);
6899 __put_user(value.sharedram, &target_value->sharedram);
6900 __put_user(value.bufferram, &target_value->bufferram);
6901 __put_user(value.totalswap, &target_value->totalswap);
6902 __put_user(value.freeswap, &target_value->freeswap);
6903 __put_user(value.procs, &target_value->procs);
6904 __put_user(value.totalhigh, &target_value->totalhigh);
6905 __put_user(value.freehigh, &target_value->freehigh);
6906 __put_user(value.mem_unit, &target_value->mem_unit);
6907 unlock_user_struct(target_value, arg1, 1);
6908 }
6909 }
6910 break;
6911 #ifdef TARGET_NR_ipc
6912 case TARGET_NR_ipc:
6913 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6914 break;
6915 #endif
6916 #ifdef TARGET_NR_semget
6917 case TARGET_NR_semget:
6918 ret = get_errno(semget(arg1, arg2, arg3));
6919 break;
6920 #endif
6921 #ifdef TARGET_NR_semop
6922 case TARGET_NR_semop:
6923 ret = get_errno(do_semop(arg1, arg2, arg3));
6924 break;
6925 #endif
6926 #ifdef TARGET_NR_semctl
6927 case TARGET_NR_semctl:
6928 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6929 break;
6930 #endif
6931 #ifdef TARGET_NR_msgctl
6932 case TARGET_NR_msgctl:
6933 ret = do_msgctl(arg1, arg2, arg3);
6934 break;
6935 #endif
6936 #ifdef TARGET_NR_msgget
6937 case TARGET_NR_msgget:
6938 ret = get_errno(msgget(arg1, arg2));
6939 break;
6940 #endif
6941 #ifdef TARGET_NR_msgrcv
6942 case TARGET_NR_msgrcv:
6943 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6944 break;
6945 #endif
6946 #ifdef TARGET_NR_msgsnd
6947 case TARGET_NR_msgsnd:
6948 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6949 break;
6950 #endif
6951 #ifdef TARGET_NR_shmget
6952 case TARGET_NR_shmget:
6953 ret = get_errno(shmget(arg1, arg2, arg3));
6954 break;
6955 #endif
6956 #ifdef TARGET_NR_shmctl
6957 case TARGET_NR_shmctl:
6958 ret = do_shmctl(arg1, arg2, arg3);
6959 break;
6960 #endif
6961 #ifdef TARGET_NR_shmat
6962 case TARGET_NR_shmat:
6963 ret = do_shmat(arg1, arg2, arg3);
6964 break;
6965 #endif
6966 #ifdef TARGET_NR_shmdt
6967 case TARGET_NR_shmdt:
6968 ret = do_shmdt(arg1);
6969 break;
6970 #endif
6971 case TARGET_NR_fsync:
6972 ret = get_errno(fsync(arg1));
6973 break;
6974 case TARGET_NR_clone:
6975 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6976 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6977 #elif defined(TARGET_CRIS)
6978 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6979 #elif defined(TARGET_MICROBLAZE)
6980 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6981 #elif defined(TARGET_S390X)
6982 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6983 #else
6984 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6985 #endif
6986 break;
6987 #ifdef __NR_exit_group
6988 /* new thread calls */
6989 case TARGET_NR_exit_group:
6990 #ifdef TARGET_GPROF
6991 _mcleanup();
6992 #endif
6993 gdb_exit(cpu_env, arg1);
6994 ret = get_errno(exit_group(arg1));
6995 break;
6996 #endif
6997 case TARGET_NR_setdomainname:
6998 if (!(p = lock_user_string(arg1)))
6999 goto efault;
7000 ret = get_errno(setdomainname(p, arg2));
7001 unlock_user(p, arg1, 0);
7002 break;
7003 case TARGET_NR_uname:
7004 /* no need to transcode because we use the linux syscall */
7005 {
7006 struct new_utsname * buf;
7007
7008 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7009 goto efault;
7010 ret = get_errno(sys_uname(buf));
7011 if (!is_error(ret)) {
7012 /* Overrite the native machine name with whatever is being
7013 emulated. */
7014 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7015 /* Allow the user to override the reported release. */
7016 if (qemu_uname_release && *qemu_uname_release)
7017 strcpy (buf->release, qemu_uname_release);
7018 }
7019 unlock_user_struct(buf, arg1, 1);
7020 }
7021 break;
7022 #ifdef TARGET_I386
7023 case TARGET_NR_modify_ldt:
7024 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7025 break;
7026 #if !defined(TARGET_X86_64)
7027 case TARGET_NR_vm86old:
7028 goto unimplemented;
7029 case TARGET_NR_vm86:
7030 ret = do_vm86(cpu_env, arg1, arg2);
7031 break;
7032 #endif
7033 #endif
7034 case TARGET_NR_adjtimex:
7035 goto unimplemented;
7036 #ifdef TARGET_NR_create_module
7037 case TARGET_NR_create_module:
7038 #endif
7039 case TARGET_NR_init_module:
7040 case TARGET_NR_delete_module:
7041 #ifdef TARGET_NR_get_kernel_syms
7042 case TARGET_NR_get_kernel_syms:
7043 #endif
7044 goto unimplemented;
7045 case TARGET_NR_quotactl:
7046 goto unimplemented;
7047 case TARGET_NR_getpgid:
7048 ret = get_errno(getpgid(arg1));
7049 break;
7050 case TARGET_NR_fchdir:
7051 ret = get_errno(fchdir(arg1));
7052 break;
7053 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7054 case TARGET_NR_bdflush:
7055 goto unimplemented;
7056 #endif
7057 #ifdef TARGET_NR_sysfs
7058 case TARGET_NR_sysfs:
7059 goto unimplemented;
7060 #endif
7061 case TARGET_NR_personality:
7062 ret = get_errno(personality(arg1));
7063 break;
7064 #ifdef TARGET_NR_afs_syscall
7065 case TARGET_NR_afs_syscall:
7066 goto unimplemented;
7067 #endif
7068 #ifdef TARGET_NR__llseek /* Not on alpha */
7069 case TARGET_NR__llseek:
7070 {
7071 int64_t res;
7072 #if !defined(__NR_llseek)
7073 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7074 if (res == -1) {
7075 ret = get_errno(res);
7076 } else {
7077 ret = 0;
7078 }
7079 #else
7080 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7081 #endif
7082 if ((ret == 0) && put_user_s64(res, arg4)) {
7083 goto efault;
7084 }
7085 }
7086 break;
7087 #endif
7088 case TARGET_NR_getdents:
7089 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7090 {
7091 struct target_dirent *target_dirp;
7092 struct linux_dirent *dirp;
7093 abi_long count = arg3;
7094
7095 dirp = malloc(count);
7096 if (!dirp) {
7097 ret = -TARGET_ENOMEM;
7098 goto fail;
7099 }
7100
7101 ret = get_errno(sys_getdents(arg1, dirp, count));
7102 if (!is_error(ret)) {
7103 struct linux_dirent *de;
7104 struct target_dirent *tde;
7105 int len = ret;
7106 int reclen, treclen;
7107 int count1, tnamelen;
7108
7109 count1 = 0;
7110 de = dirp;
7111 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7112 goto efault;
7113 tde = target_dirp;
7114 while (len > 0) {
7115 reclen = de->d_reclen;
7116 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7117 assert(tnamelen >= 0);
7118 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7119 assert(count1 + treclen <= count);
7120 tde->d_reclen = tswap16(treclen);
7121 tde->d_ino = tswapal(de->d_ino);
7122 tde->d_off = tswapal(de->d_off);
7123 memcpy(tde->d_name, de->d_name, tnamelen);
7124 de = (struct linux_dirent *)((char *)de + reclen);
7125 len -= reclen;
7126 tde = (struct target_dirent *)((char *)tde + treclen);
7127 count1 += treclen;
7128 }
7129 ret = count1;
7130 unlock_user(target_dirp, arg2, ret);
7131 }
7132 free(dirp);
7133 }
7134 #else
7135 {
7136 struct linux_dirent *dirp;
7137 abi_long count = arg3;
7138
7139 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7140 goto efault;
7141 ret = get_errno(sys_getdents(arg1, dirp, count));
7142 if (!is_error(ret)) {
7143 struct linux_dirent *de;
7144 int len = ret;
7145 int reclen;
7146 de = dirp;
7147 while (len > 0) {
7148 reclen = de->d_reclen;
7149 if (reclen > len)
7150 break;
7151 de->d_reclen = tswap16(reclen);
7152 tswapls(&de->d_ino);
7153 tswapls(&de->d_off);
7154 de = (struct linux_dirent *)((char *)de + reclen);
7155 len -= reclen;
7156 }
7157 }
7158 unlock_user(dirp, arg2, ret);
7159 }
7160 #endif
7161 break;
7162 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7163 case TARGET_NR_getdents64:
7164 {
7165 struct linux_dirent64 *dirp;
7166 abi_long count = arg3;
7167 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7168 goto efault;
7169 ret = get_errno(sys_getdents64(arg1, dirp, count));
7170 if (!is_error(ret)) {
7171 struct linux_dirent64 *de;
7172 int len = ret;
7173 int reclen;
7174 de = dirp;
7175 while (len > 0) {
7176 reclen = de->d_reclen;
7177 if (reclen > len)
7178 break;
7179 de->d_reclen = tswap16(reclen);
7180 tswap64s((uint64_t *)&de->d_ino);
7181 tswap64s((uint64_t *)&de->d_off);
7182 de = (struct linux_dirent64 *)((char *)de + reclen);
7183 len -= reclen;
7184 }
7185 }
7186 unlock_user(dirp, arg2, ret);
7187 }
7188 break;
7189 #endif /* TARGET_NR_getdents64 */
7190 #if defined(TARGET_NR__newselect)
7191 case TARGET_NR__newselect:
7192 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7193 break;
7194 #endif
7195 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7196 # ifdef TARGET_NR_poll
7197 case TARGET_NR_poll:
7198 # endif
7199 # ifdef TARGET_NR_ppoll
7200 case TARGET_NR_ppoll:
7201 # endif
7202 {
7203 struct target_pollfd *target_pfd;
7204 unsigned int nfds = arg2;
7205 int timeout = arg3;
7206 struct pollfd *pfd;
7207 unsigned int i;
7208
7209 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7210 if (!target_pfd)
7211 goto efault;
7212
7213 pfd = alloca(sizeof(struct pollfd) * nfds);
7214 for(i = 0; i < nfds; i++) {
7215 pfd[i].fd = tswap32(target_pfd[i].fd);
7216 pfd[i].events = tswap16(target_pfd[i].events);
7217 }
7218
7219 # ifdef TARGET_NR_ppoll
7220 if (num == TARGET_NR_ppoll) {
7221 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7222 target_sigset_t *target_set;
7223 sigset_t _set, *set = &_set;
7224
7225 if (arg3) {
7226 if (target_to_host_timespec(timeout_ts, arg3)) {
7227 unlock_user(target_pfd, arg1, 0);
7228 goto efault;
7229 }
7230 } else {
7231 timeout_ts = NULL;
7232 }
7233
7234 if (arg4) {
7235 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7236 if (!target_set) {
7237 unlock_user(target_pfd, arg1, 0);
7238 goto efault;
7239 }
7240 target_to_host_sigset(set, target_set);
7241 } else {
7242 set = NULL;
7243 }
7244
7245 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7246
7247 if (!is_error(ret) && arg3) {
7248 host_to_target_timespec(arg3, timeout_ts);
7249 }
7250 if (arg4) {
7251 unlock_user(target_set, arg4, 0);
7252 }
7253 } else
7254 # endif
7255 ret = get_errno(poll(pfd, nfds, timeout));
7256
7257 if (!is_error(ret)) {
7258 for(i = 0; i < nfds; i++) {
7259 target_pfd[i].revents = tswap16(pfd[i].revents);
7260 }
7261 }
7262 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7263 }
7264 break;
7265 #endif
7266 case TARGET_NR_flock:
7267 /* NOTE: the flock constant seems to be the same for every
7268 Linux platform */
7269 ret = get_errno(flock(arg1, arg2));
7270 break;
7271 case TARGET_NR_readv:
7272 {
7273 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7274 if (vec != NULL) {
7275 ret = get_errno(readv(arg1, vec, arg3));
7276 unlock_iovec(vec, arg2, arg3, 1);
7277 } else {
7278 ret = -host_to_target_errno(errno);
7279 }
7280 }
7281 break;
7282 case TARGET_NR_writev:
7283 {
7284 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7285 if (vec != NULL) {
7286 ret = get_errno(writev(arg1, vec, arg3));
7287 unlock_iovec(vec, arg2, arg3, 0);
7288 } else {
7289 ret = -host_to_target_errno(errno);
7290 }
7291 }
7292 break;
7293 case TARGET_NR_getsid:
7294 ret = get_errno(getsid(arg1));
7295 break;
7296 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7297 case TARGET_NR_fdatasync:
7298 ret = get_errno(fdatasync(arg1));
7299 break;
7300 #endif
7301 case TARGET_NR__sysctl:
7302 /* We don't implement this, but ENOTDIR is always a safe
7303 return value. */
7304 ret = -TARGET_ENOTDIR;
7305 break;
7306 case TARGET_NR_sched_getaffinity:
7307 {
7308 unsigned int mask_size;
7309 unsigned long *mask;
7310
7311 /*
7312 * sched_getaffinity needs multiples of ulong, so need to take
7313 * care of mismatches between target ulong and host ulong sizes.
7314 */
7315 if (arg2 & (sizeof(abi_ulong) - 1)) {
7316 ret = -TARGET_EINVAL;
7317 break;
7318 }
7319 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7320
7321 mask = alloca(mask_size);
7322 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7323
7324 if (!is_error(ret)) {
7325 if (copy_to_user(arg3, mask, ret)) {
7326 goto efault;
7327 }
7328 }
7329 }
7330 break;
7331 case TARGET_NR_sched_setaffinity:
7332 {
7333 unsigned int mask_size;
7334 unsigned long *mask;
7335
7336 /*
7337 * sched_setaffinity needs multiples of ulong, so need to take
7338 * care of mismatches between target ulong and host ulong sizes.
7339 */
7340 if (arg2 & (sizeof(abi_ulong) - 1)) {
7341 ret = -TARGET_EINVAL;
7342 break;
7343 }
7344 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7345
7346 mask = alloca(mask_size);
7347 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7348 goto efault;
7349 }
7350 memcpy(mask, p, arg2);
7351 unlock_user_struct(p, arg2, 0);
7352
7353 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7354 }
7355 break;
7356 case TARGET_NR_sched_setparam:
7357 {
7358 struct sched_param *target_schp;
7359 struct sched_param schp;
7360
7361 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7362 goto efault;
7363 schp.sched_priority = tswap32(target_schp->sched_priority);
7364 unlock_user_struct(target_schp, arg2, 0);
7365 ret = get_errno(sched_setparam(arg1, &schp));
7366 }
7367 break;
7368 case TARGET_NR_sched_getparam:
7369 {
7370 struct sched_param *target_schp;
7371 struct sched_param schp;
7372 ret = get_errno(sched_getparam(arg1, &schp));
7373 if (!is_error(ret)) {
7374 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7375 goto efault;
7376 target_schp->sched_priority = tswap32(schp.sched_priority);
7377 unlock_user_struct(target_schp, arg2, 1);
7378 }
7379 }
7380 break;
7381 case TARGET_NR_sched_setscheduler:
7382 {
7383 struct sched_param *target_schp;
7384 struct sched_param schp;
7385 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7386 goto efault;
7387 schp.sched_priority = tswap32(target_schp->sched_priority);
7388 unlock_user_struct(target_schp, arg3, 0);
7389 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7390 }
7391 break;
7392 case TARGET_NR_sched_getscheduler:
7393 ret = get_errno(sched_getscheduler(arg1));
7394 break;
7395 case TARGET_NR_sched_yield:
7396 ret = get_errno(sched_yield());
7397 break;
7398 case TARGET_NR_sched_get_priority_max:
7399 ret = get_errno(sched_get_priority_max(arg1));
7400 break;
7401 case TARGET_NR_sched_get_priority_min:
7402 ret = get_errno(sched_get_priority_min(arg1));
7403 break;
7404 case TARGET_NR_sched_rr_get_interval:
7405 {
7406 struct timespec ts;
7407 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7408 if (!is_error(ret)) {
7409 host_to_target_timespec(arg2, &ts);
7410 }
7411 }
7412 break;
7413 case TARGET_NR_nanosleep:
7414 {
7415 struct timespec req, rem;
7416 target_to_host_timespec(&req, arg1);
7417 ret = get_errno(nanosleep(&req, &rem));
7418 if (is_error(ret) && arg2) {
7419 host_to_target_timespec(arg2, &rem);
7420 }
7421 }
7422 break;
7423 #ifdef TARGET_NR_query_module
7424 case TARGET_NR_query_module:
7425 goto unimplemented;
7426 #endif
7427 #ifdef TARGET_NR_nfsservctl
7428 case TARGET_NR_nfsservctl:
7429 goto unimplemented;
7430 #endif
7431 case TARGET_NR_prctl:
7432 switch (arg1) {
7433 case PR_GET_PDEATHSIG:
7434 {
7435 int deathsig;
7436 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7437 if (!is_error(ret) && arg2
7438 && put_user_ual(deathsig, arg2)) {
7439 goto efault;
7440 }
7441 break;
7442 }
7443 #ifdef PR_GET_NAME
7444 case PR_GET_NAME:
7445 {
7446 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7447 if (!name) {
7448 goto efault;
7449 }
7450 ret = get_errno(prctl(arg1, (unsigned long)name,
7451 arg3, arg4, arg5));
7452 unlock_user(name, arg2, 16);
7453 break;
7454 }
7455 case PR_SET_NAME:
7456 {
7457 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7458 if (!name) {
7459 goto efault;
7460 }
7461 ret = get_errno(prctl(arg1, (unsigned long)name,
7462 arg3, arg4, arg5));
7463 unlock_user(name, arg2, 0);
7464 break;
7465 }
7466 #endif
7467 default:
7468 /* Most prctl options have no pointer arguments */
7469 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7470 break;
7471 }
7472 break;
7473 #ifdef TARGET_NR_arch_prctl
7474 case TARGET_NR_arch_prctl:
7475 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7476 ret = do_arch_prctl(cpu_env, arg1, arg2);
7477 break;
7478 #else
7479 goto unimplemented;
7480 #endif
7481 #endif
7482 #ifdef TARGET_NR_pread64
7483 case TARGET_NR_pread64:
7484 if (regpairs_aligned(cpu_env)) {
7485 arg4 = arg5;
7486 arg5 = arg6;
7487 }
7488 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7489 goto efault;
7490 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7491 unlock_user(p, arg2, ret);
7492 break;
7493 case TARGET_NR_pwrite64:
7494 if (regpairs_aligned(cpu_env)) {
7495 arg4 = arg5;
7496 arg5 = arg6;
7497 }
7498 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7499 goto efault;
7500 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7501 unlock_user(p, arg2, 0);
7502 break;
7503 #endif
7504 case TARGET_NR_getcwd:
7505 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7506 goto efault;
7507 ret = get_errno(sys_getcwd1(p, arg2));
7508 unlock_user(p, arg1, ret);
7509 break;
7510 case TARGET_NR_capget:
7511 goto unimplemented;
7512 case TARGET_NR_capset:
7513 goto unimplemented;
7514 case TARGET_NR_sigaltstack:
7515 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7516 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7517 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7518 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7519 break;
7520 #else
7521 goto unimplemented;
7522 #endif
7523 case TARGET_NR_sendfile:
7524 goto unimplemented;
7525 #ifdef TARGET_NR_getpmsg
7526 case TARGET_NR_getpmsg:
7527 goto unimplemented;
7528 #endif
7529 #ifdef TARGET_NR_putpmsg
7530 case TARGET_NR_putpmsg:
7531 goto unimplemented;
7532 #endif
7533 #ifdef TARGET_NR_vfork
7534 case TARGET_NR_vfork:
7535 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7536 0, 0, 0, 0));
7537 break;
7538 #endif
7539 #ifdef TARGET_NR_ugetrlimit
7540 case TARGET_NR_ugetrlimit:
7541 {
7542 struct rlimit rlim;
7543 int resource = target_to_host_resource(arg1);
7544 ret = get_errno(getrlimit(resource, &rlim));
7545 if (!is_error(ret)) {
7546 struct target_rlimit *target_rlim;
7547 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7548 goto efault;
7549 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7550 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7551 unlock_user_struct(target_rlim, arg2, 1);
7552 }
7553 break;
7554 }
7555 #endif
7556 #ifdef TARGET_NR_truncate64
7557 case TARGET_NR_truncate64:
7558 if (!(p = lock_user_string(arg1)))
7559 goto efault;
7560 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7561 unlock_user(p, arg1, 0);
7562 break;
7563 #endif
7564 #ifdef TARGET_NR_ftruncate64
7565 case TARGET_NR_ftruncate64:
7566 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7567 break;
7568 #endif
7569 #ifdef TARGET_NR_stat64
7570 case TARGET_NR_stat64:
7571 if (!(p = lock_user_string(arg1)))
7572 goto efault;
7573 ret = get_errno(stat(path(p), &st));
7574 unlock_user(p, arg1, 0);
7575 if (!is_error(ret))
7576 ret = host_to_target_stat64(cpu_env, arg2, &st);
7577 break;
7578 #endif
7579 #ifdef TARGET_NR_lstat64
7580 case TARGET_NR_lstat64:
7581 if (!(p = lock_user_string(arg1)))
7582 goto efault;
7583 ret = get_errno(lstat(path(p), &st));
7584 unlock_user(p, arg1, 0);
7585 if (!is_error(ret))
7586 ret = host_to_target_stat64(cpu_env, arg2, &st);
7587 break;
7588 #endif
7589 #ifdef TARGET_NR_fstat64
7590 case TARGET_NR_fstat64:
7591 ret = get_errno(fstat(arg1, &st));
7592 if (!is_error(ret))
7593 ret = host_to_target_stat64(cpu_env, arg2, &st);
7594 break;
7595 #endif
7596 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7597 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7598 #ifdef TARGET_NR_fstatat64
7599 case TARGET_NR_fstatat64:
7600 #endif
7601 #ifdef TARGET_NR_newfstatat
7602 case TARGET_NR_newfstatat:
7603 #endif
7604 if (!(p = lock_user_string(arg2)))
7605 goto efault;
7606 #ifdef __NR_fstatat64
7607 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7608 #else
7609 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7610 #endif
7611 if (!is_error(ret))
7612 ret = host_to_target_stat64(cpu_env, arg3, &st);
7613 break;
7614 #endif
7615 case TARGET_NR_lchown:
7616 if (!(p = lock_user_string(arg1)))
7617 goto efault;
7618 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7619 unlock_user(p, arg1, 0);
7620 break;
7621 #ifdef TARGET_NR_getuid
7622 case TARGET_NR_getuid:
7623 ret = get_errno(high2lowuid(getuid()));
7624 break;
7625 #endif
7626 #ifdef TARGET_NR_getgid
7627 case TARGET_NR_getgid:
7628 ret = get_errno(high2lowgid(getgid()));
7629 break;
7630 #endif
7631 #ifdef TARGET_NR_geteuid
7632 case TARGET_NR_geteuid:
7633 ret = get_errno(high2lowuid(geteuid()));
7634 break;
7635 #endif
7636 #ifdef TARGET_NR_getegid
7637 case TARGET_NR_getegid:
7638 ret = get_errno(high2lowgid(getegid()));
7639 break;
7640 #endif
7641 case TARGET_NR_setreuid:
7642 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7643 break;
7644 case TARGET_NR_setregid:
7645 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7646 break;
7647 case TARGET_NR_getgroups:
7648 {
7649 int gidsetsize = arg1;
7650 target_id *target_grouplist;
7651 gid_t *grouplist;
7652 int i;
7653
7654 grouplist = alloca(gidsetsize * sizeof(gid_t));
7655 ret = get_errno(getgroups(gidsetsize, grouplist));
7656 if (gidsetsize == 0)
7657 break;
7658 if (!is_error(ret)) {
7659 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7660 if (!target_grouplist)
7661 goto efault;
7662 for(i = 0;i < ret; i++)
7663 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7664 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7665 }
7666 }
7667 break;
7668 case TARGET_NR_setgroups:
7669 {
7670 int gidsetsize = arg1;
7671 target_id *target_grouplist;
7672 gid_t *grouplist;
7673 int i;
7674
7675 grouplist = alloca(gidsetsize * sizeof(gid_t));
7676 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7677 if (!target_grouplist) {
7678 ret = -TARGET_EFAULT;
7679 goto fail;
7680 }
7681 for(i = 0;i < gidsetsize; i++)
7682 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7683 unlock_user(target_grouplist, arg2, 0);
7684 ret = get_errno(setgroups(gidsetsize, grouplist));
7685 }
7686 break;
7687 case TARGET_NR_fchown:
7688 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7689 break;
7690 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7691 case TARGET_NR_fchownat:
7692 if (!(p = lock_user_string(arg2)))
7693 goto efault;
7694 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7695 unlock_user(p, arg2, 0);
7696 break;
7697 #endif
7698 #ifdef TARGET_NR_setresuid
7699 case TARGET_NR_setresuid:
7700 ret = get_errno(setresuid(low2highuid(arg1),
7701 low2highuid(arg2),
7702 low2highuid(arg3)));
7703 break;
7704 #endif
7705 #ifdef TARGET_NR_getresuid
7706 case TARGET_NR_getresuid:
7707 {
7708 uid_t ruid, euid, suid;
7709 ret = get_errno(getresuid(&ruid, &euid, &suid));
7710 if (!is_error(ret)) {
7711 if (put_user_u16(high2lowuid(ruid), arg1)
7712 || put_user_u16(high2lowuid(euid), arg2)
7713 || put_user_u16(high2lowuid(suid), arg3))
7714 goto efault;
7715 }
7716 }
7717 break;
7718 #endif
7719 #ifdef TARGET_NR_getresgid
7720 case TARGET_NR_setresgid:
7721 ret = get_errno(setresgid(low2highgid(arg1),
7722 low2highgid(arg2),
7723 low2highgid(arg3)));
7724 break;
7725 #endif
7726 #ifdef TARGET_NR_getresgid
7727 case TARGET_NR_getresgid:
7728 {
7729 gid_t rgid, egid, sgid;
7730 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7731 if (!is_error(ret)) {
7732 if (put_user_u16(high2lowgid(rgid), arg1)
7733 || put_user_u16(high2lowgid(egid), arg2)
7734 || put_user_u16(high2lowgid(sgid), arg3))
7735 goto efault;
7736 }
7737 }
7738 break;
7739 #endif
7740 case TARGET_NR_chown:
7741 if (!(p = lock_user_string(arg1)))
7742 goto efault;
7743 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7744 unlock_user(p, arg1, 0);
7745 break;
7746 case TARGET_NR_setuid:
7747 ret = get_errno(setuid(low2highuid(arg1)));
7748 break;
7749 case TARGET_NR_setgid:
7750 ret = get_errno(setgid(low2highgid(arg1)));
7751 break;
7752 case TARGET_NR_setfsuid:
7753 ret = get_errno(setfsuid(arg1));
7754 break;
7755 case TARGET_NR_setfsgid:
7756 ret = get_errno(setfsgid(arg1));
7757 break;
7758
7759 #ifdef TARGET_NR_lchown32
7760 case TARGET_NR_lchown32:
7761 if (!(p = lock_user_string(arg1)))
7762 goto efault;
7763 ret = get_errno(lchown(p, arg2, arg3));
7764 unlock_user(p, arg1, 0);
7765 break;
7766 #endif
7767 #ifdef TARGET_NR_getuid32
7768 case TARGET_NR_getuid32:
7769 ret = get_errno(getuid());
7770 break;
7771 #endif
7772
7773 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7774 /* Alpha specific */
7775 case TARGET_NR_getxuid:
7776 {
7777 uid_t euid;
7778 euid=geteuid();
7779 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7780 }
7781 ret = get_errno(getuid());
7782 break;
7783 #endif
7784 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7785 /* Alpha specific */
7786 case TARGET_NR_getxgid:
7787 {
7788 uid_t egid;
7789 egid=getegid();
7790 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7791 }
7792 ret = get_errno(getgid());
7793 break;
7794 #endif
7795 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7796 /* Alpha specific */
7797 case TARGET_NR_osf_getsysinfo:
7798 ret = -TARGET_EOPNOTSUPP;
7799 switch (arg1) {
7800 case TARGET_GSI_IEEE_FP_CONTROL:
7801 {
7802 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7803
7804 /* Copied from linux ieee_fpcr_to_swcr. */
7805 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7806 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7807 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7808 | SWCR_TRAP_ENABLE_DZE
7809 | SWCR_TRAP_ENABLE_OVF);
7810 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7811 | SWCR_TRAP_ENABLE_INE);
7812 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7813 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7814
7815 if (put_user_u64 (swcr, arg2))
7816 goto efault;
7817 ret = 0;
7818 }
7819 break;
7820
7821 /* case GSI_IEEE_STATE_AT_SIGNAL:
7822 -- Not implemented in linux kernel.
7823 case GSI_UACPROC:
7824 -- Retrieves current unaligned access state; not much used.
7825 case GSI_PROC_TYPE:
7826 -- Retrieves implver information; surely not used.
7827 case GSI_GET_HWRPB:
7828 -- Grabs a copy of the HWRPB; surely not used.
7829 */
7830 }
7831 break;
7832 #endif
7833 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7834 /* Alpha specific */
7835 case TARGET_NR_osf_setsysinfo:
7836 ret = -TARGET_EOPNOTSUPP;
7837 switch (arg1) {
7838 case TARGET_SSI_IEEE_FP_CONTROL:
7839 {
7840 uint64_t swcr, fpcr, orig_fpcr;
7841
7842 if (get_user_u64 (swcr, arg2)) {
7843 goto efault;
7844 }
7845 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7846 fpcr = orig_fpcr & FPCR_DYN_MASK;
7847
7848 /* Copied from linux ieee_swcr_to_fpcr. */
7849 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7850 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7851 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7852 | SWCR_TRAP_ENABLE_DZE
7853 | SWCR_TRAP_ENABLE_OVF)) << 48;
7854 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7855 | SWCR_TRAP_ENABLE_INE)) << 57;
7856 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7857 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7858
7859 cpu_alpha_store_fpcr(cpu_env, fpcr);
7860 ret = 0;
7861 }
7862 break;
7863
7864 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7865 {
7866 uint64_t exc, fpcr, orig_fpcr;
7867 int si_code;
7868
7869 if (get_user_u64(exc, arg2)) {
7870 goto efault;
7871 }
7872
7873 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7874
7875 /* We only add to the exception status here. */
7876 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7877
7878 cpu_alpha_store_fpcr(cpu_env, fpcr);
7879 ret = 0;
7880
7881 /* Old exceptions are not signaled. */
7882 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7883
7884 /* If any exceptions set by this call,
7885 and are unmasked, send a signal. */
7886 si_code = 0;
7887 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7888 si_code = TARGET_FPE_FLTRES;
7889 }
7890 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7891 si_code = TARGET_FPE_FLTUND;
7892 }
7893 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7894 si_code = TARGET_FPE_FLTOVF;
7895 }
7896 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7897 si_code = TARGET_FPE_FLTDIV;
7898 }
7899 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7900 si_code = TARGET_FPE_FLTINV;
7901 }
7902 if (si_code != 0) {
7903 target_siginfo_t info;
7904 info.si_signo = SIGFPE;
7905 info.si_errno = 0;
7906 info.si_code = si_code;
7907 info._sifields._sigfault._addr
7908 = ((CPUArchState *)cpu_env)->pc;
7909 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7910 }
7911 }
7912 break;
7913
7914 /* case SSI_NVPAIRS:
7915 -- Used with SSIN_UACPROC to enable unaligned accesses.
7916 case SSI_IEEE_STATE_AT_SIGNAL:
7917 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7918 -- Not implemented in linux kernel
7919 */
7920 }
7921 break;
7922 #endif
7923 #ifdef TARGET_NR_osf_sigprocmask
7924 /* Alpha specific. */
7925 case TARGET_NR_osf_sigprocmask:
7926 {
7927 abi_ulong mask;
7928 int how;
7929 sigset_t set, oldset;
7930
7931 switch(arg1) {
7932 case TARGET_SIG_BLOCK:
7933 how = SIG_BLOCK;
7934 break;
7935 case TARGET_SIG_UNBLOCK:
7936 how = SIG_UNBLOCK;
7937 break;
7938 case TARGET_SIG_SETMASK:
7939 how = SIG_SETMASK;
7940 break;
7941 default:
7942 ret = -TARGET_EINVAL;
7943 goto fail;
7944 }
7945 mask = arg2;
7946 target_to_host_old_sigset(&set, &mask);
7947 sigprocmask(how, &set, &oldset);
7948 host_to_target_old_sigset(&mask, &oldset);
7949 ret = mask;
7950 }
7951 break;
7952 #endif
7953
7954 #ifdef TARGET_NR_getgid32
7955 case TARGET_NR_getgid32:
7956 ret = get_errno(getgid());
7957 break;
7958 #endif
7959 #ifdef TARGET_NR_geteuid32
7960 case TARGET_NR_geteuid32:
7961 ret = get_errno(geteuid());
7962 break;
7963 #endif
7964 #ifdef TARGET_NR_getegid32
7965 case TARGET_NR_getegid32:
7966 ret = get_errno(getegid());
7967 break;
7968 #endif
7969 #ifdef TARGET_NR_setreuid32
7970 case TARGET_NR_setreuid32:
7971 ret = get_errno(setreuid(arg1, arg2));
7972 break;
7973 #endif
7974 #ifdef TARGET_NR_setregid32
7975 case TARGET_NR_setregid32:
7976 ret = get_errno(setregid(arg1, arg2));
7977 break;
7978 #endif
7979 #ifdef TARGET_NR_getgroups32
7980 case TARGET_NR_getgroups32:
7981 {
7982 int gidsetsize = arg1;
7983 uint32_t *target_grouplist;
7984 gid_t *grouplist;
7985 int i;
7986
7987 grouplist = alloca(gidsetsize * sizeof(gid_t));
7988 ret = get_errno(getgroups(gidsetsize, grouplist));
7989 if (gidsetsize == 0)
7990 break;
7991 if (!is_error(ret)) {
7992 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7993 if (!target_grouplist) {
7994 ret = -TARGET_EFAULT;
7995 goto fail;
7996 }
7997 for(i = 0;i < ret; i++)
7998 target_grouplist[i] = tswap32(grouplist[i]);
7999 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8000 }
8001 }
8002 break;
8003 #endif
8004 #ifdef TARGET_NR_setgroups32
8005 case TARGET_NR_setgroups32:
8006 {
8007 int gidsetsize = arg1;
8008 uint32_t *target_grouplist;
8009 gid_t *grouplist;
8010 int i;
8011
8012 grouplist = alloca(gidsetsize * sizeof(gid_t));
8013 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8014 if (!target_grouplist) {
8015 ret = -TARGET_EFAULT;
8016 goto fail;
8017 }
8018 for(i = 0;i < gidsetsize; i++)
8019 grouplist[i] = tswap32(target_grouplist[i]);
8020 unlock_user(target_grouplist, arg2, 0);
8021 ret = get_errno(setgroups(gidsetsize, grouplist));
8022 }
8023 break;
8024 #endif
8025 #ifdef TARGET_NR_fchown32
8026 case TARGET_NR_fchown32:
8027 ret = get_errno(fchown(arg1, arg2, arg3));
8028 break;
8029 #endif
8030 #ifdef TARGET_NR_setresuid32
8031 case TARGET_NR_setresuid32:
8032 ret = get_errno(setresuid(arg1, arg2, arg3));
8033 break;
8034 #endif
8035 #ifdef TARGET_NR_getresuid32
8036 case TARGET_NR_getresuid32:
8037 {
8038 uid_t ruid, euid, suid;
8039 ret = get_errno(getresuid(&ruid, &euid, &suid));
8040 if (!is_error(ret)) {
8041 if (put_user_u32(ruid, arg1)
8042 || put_user_u32(euid, arg2)
8043 || put_user_u32(suid, arg3))
8044 goto efault;
8045 }
8046 }
8047 break;
8048 #endif
8049 #ifdef TARGET_NR_setresgid32
8050 case TARGET_NR_setresgid32:
8051 ret = get_errno(setresgid(arg1, arg2, arg3));
8052 break;
8053 #endif
8054 #ifdef TARGET_NR_getresgid32
8055 case TARGET_NR_getresgid32:
8056 {
8057 gid_t rgid, egid, sgid;
8058 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8059 if (!is_error(ret)) {
8060 if (put_user_u32(rgid, arg1)
8061 || put_user_u32(egid, arg2)
8062 || put_user_u32(sgid, arg3))
8063 goto efault;
8064 }
8065 }
8066 break;
8067 #endif
8068 #ifdef TARGET_NR_chown32
8069 case TARGET_NR_chown32:
8070 if (!(p = lock_user_string(arg1)))
8071 goto efault;
8072 ret = get_errno(chown(p, arg2, arg3));
8073 unlock_user(p, arg1, 0);
8074 break;
8075 #endif
8076 #ifdef TARGET_NR_setuid32
8077 case TARGET_NR_setuid32:
8078 ret = get_errno(setuid(arg1));
8079 break;
8080 #endif
8081 #ifdef TARGET_NR_setgid32
8082 case TARGET_NR_setgid32:
8083 ret = get_errno(setgid(arg1));
8084 break;
8085 #endif
8086 #ifdef TARGET_NR_setfsuid32
8087 case TARGET_NR_setfsuid32:
8088 ret = get_errno(setfsuid(arg1));
8089 break;
8090 #endif
8091 #ifdef TARGET_NR_setfsgid32
8092 case TARGET_NR_setfsgid32:
8093 ret = get_errno(setfsgid(arg1));
8094 break;
8095 #endif
8096
8097 case TARGET_NR_pivot_root:
8098 goto unimplemented;
8099 #ifdef TARGET_NR_mincore
8100 case TARGET_NR_mincore:
8101 {
8102 void *a;
8103 ret = -TARGET_EFAULT;
8104 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8105 goto efault;
8106 if (!(p = lock_user_string(arg3)))
8107 goto mincore_fail;
8108 ret = get_errno(mincore(a, arg2, p));
8109 unlock_user(p, arg3, ret);
8110 mincore_fail:
8111 unlock_user(a, arg1, 0);
8112 }
8113 break;
8114 #endif
8115 #ifdef TARGET_NR_arm_fadvise64_64
8116 case TARGET_NR_arm_fadvise64_64:
8117 {
8118 /*
8119 * arm_fadvise64_64 looks like fadvise64_64 but
8120 * with different argument order
8121 */
8122 abi_long temp;
8123 temp = arg3;
8124 arg3 = arg4;
8125 arg4 = temp;
8126 }
8127 #endif
8128 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8129 #ifdef TARGET_NR_fadvise64_64
8130 case TARGET_NR_fadvise64_64:
8131 #endif
8132 #ifdef TARGET_NR_fadvise64
8133 case TARGET_NR_fadvise64:
8134 #endif
8135 #ifdef TARGET_S390X
8136 switch (arg4) {
8137 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8138 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8139 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8140 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8141 default: break;
8142 }
8143 #endif
8144 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8145 break;
8146 #endif
8147 #ifdef TARGET_NR_madvise
8148 case TARGET_NR_madvise:
8149 /* A straight passthrough may not be safe because qemu sometimes
8150 turns private flie-backed mappings into anonymous mappings.
8151 This will break MADV_DONTNEED.
8152 This is a hint, so ignoring and returning success is ok. */
8153 ret = get_errno(0);
8154 break;
8155 #endif
8156 #if TARGET_ABI_BITS == 32
8157 case TARGET_NR_fcntl64:
8158 {
8159 int cmd;
8160 struct flock64 fl;
8161 struct target_flock64 *target_fl;
8162 #ifdef TARGET_ARM
8163 struct target_eabi_flock64 *target_efl;
8164 #endif
8165
8166 cmd = target_to_host_fcntl_cmd(arg2);
8167 if (cmd == -TARGET_EINVAL) {
8168 ret = cmd;
8169 break;
8170 }
8171
8172 switch(arg2) {
8173 case TARGET_F_GETLK64:
8174 #ifdef TARGET_ARM
8175 if (((CPUARMState *)cpu_env)->eabi) {
8176 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8177 goto efault;
8178 fl.l_type = tswap16(target_efl->l_type);
8179 fl.l_whence = tswap16(target_efl->l_whence);
8180 fl.l_start = tswap64(target_efl->l_start);
8181 fl.l_len = tswap64(target_efl->l_len);
8182 fl.l_pid = tswap32(target_efl->l_pid);
8183 unlock_user_struct(target_efl, arg3, 0);
8184 } else
8185 #endif
8186 {
8187 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8188 goto efault;
8189 fl.l_type = tswap16(target_fl->l_type);
8190 fl.l_whence = tswap16(target_fl->l_whence);
8191 fl.l_start = tswap64(target_fl->l_start);
8192 fl.l_len = tswap64(target_fl->l_len);
8193 fl.l_pid = tswap32(target_fl->l_pid);
8194 unlock_user_struct(target_fl, arg3, 0);
8195 }
8196 ret = get_errno(fcntl(arg1, cmd, &fl));
8197 if (ret == 0) {
8198 #ifdef TARGET_ARM
8199 if (((CPUARMState *)cpu_env)->eabi) {
8200 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8201 goto efault;
8202 target_efl->l_type = tswap16(fl.l_type);
8203 target_efl->l_whence = tswap16(fl.l_whence);
8204 target_efl->l_start = tswap64(fl.l_start);
8205 target_efl->l_len = tswap64(fl.l_len);
8206 target_efl->l_pid = tswap32(fl.l_pid);
8207 unlock_user_struct(target_efl, arg3, 1);
8208 } else
8209 #endif
8210 {
8211 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8212 goto efault;
8213 target_fl->l_type = tswap16(fl.l_type);
8214 target_fl->l_whence = tswap16(fl.l_whence);
8215 target_fl->l_start = tswap64(fl.l_start);
8216 target_fl->l_len = tswap64(fl.l_len);
8217 target_fl->l_pid = tswap32(fl.l_pid);
8218 unlock_user_struct(target_fl, arg3, 1);
8219 }
8220 }
8221 break;
8222
8223 case TARGET_F_SETLK64:
8224 case TARGET_F_SETLKW64:
8225 #ifdef TARGET_ARM
8226 if (((CPUARMState *)cpu_env)->eabi) {
8227 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8228 goto efault;
8229 fl.l_type = tswap16(target_efl->l_type);
8230 fl.l_whence = tswap16(target_efl->l_whence);
8231 fl.l_start = tswap64(target_efl->l_start);
8232 fl.l_len = tswap64(target_efl->l_len);
8233 fl.l_pid = tswap32(target_efl->l_pid);
8234 unlock_user_struct(target_efl, arg3, 0);
8235 } else
8236 #endif
8237 {
8238 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8239 goto efault;
8240 fl.l_type = tswap16(target_fl->l_type);
8241 fl.l_whence = tswap16(target_fl->l_whence);
8242 fl.l_start = tswap64(target_fl->l_start);
8243 fl.l_len = tswap64(target_fl->l_len);
8244 fl.l_pid = tswap32(target_fl->l_pid);
8245 unlock_user_struct(target_fl, arg3, 0);
8246 }
8247 ret = get_errno(fcntl(arg1, cmd, &fl));
8248 break;
8249 default:
8250 ret = do_fcntl(arg1, arg2, arg3);
8251 break;
8252 }
8253 break;
8254 }
8255 #endif
8256 #ifdef TARGET_NR_cacheflush
8257 case TARGET_NR_cacheflush:
8258 /* self-modifying code is handled automatically, so nothing needed */
8259 ret = 0;
8260 break;
8261 #endif
8262 #ifdef TARGET_NR_security
8263 case TARGET_NR_security:
8264 goto unimplemented;
8265 #endif
8266 #ifdef TARGET_NR_getpagesize
8267 case TARGET_NR_getpagesize:
8268 ret = TARGET_PAGE_SIZE;
8269 break;
8270 #endif
8271 case TARGET_NR_gettid:
8272 ret = get_errno(gettid());
8273 break;
8274 #ifdef TARGET_NR_readahead
8275 case TARGET_NR_readahead:
8276 #if TARGET_ABI_BITS == 32
8277 if (regpairs_aligned(cpu_env)) {
8278 arg2 = arg3;
8279 arg3 = arg4;
8280 arg4 = arg5;
8281 }
8282 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8283 #else
8284 ret = get_errno(readahead(arg1, arg2, arg3));
8285 #endif
8286 break;
8287 #endif
8288 #ifdef CONFIG_ATTR
8289 #ifdef TARGET_NR_setxattr
8290 case TARGET_NR_listxattr:
8291 case TARGET_NR_llistxattr:
8292 {
8293 void *p, *b = 0;
8294 if (arg2) {
8295 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8296 if (!b) {
8297 ret = -TARGET_EFAULT;
8298 break;
8299 }
8300 }
8301 p = lock_user_string(arg1);
8302 if (p) {
8303 if (num == TARGET_NR_listxattr) {
8304 ret = get_errno(listxattr(p, b, arg3));
8305 } else {
8306 ret = get_errno(llistxattr(p, b, arg3));
8307 }
8308 } else {
8309 ret = -TARGET_EFAULT;
8310 }
8311 unlock_user(p, arg1, 0);
8312 unlock_user(b, arg2, arg3);
8313 break;
8314 }
8315 case TARGET_NR_flistxattr:
8316 {
8317 void *b = 0;
8318 if (arg2) {
8319 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8320 if (!b) {
8321 ret = -TARGET_EFAULT;
8322 break;
8323 }
8324 }
8325 ret = get_errno(flistxattr(arg1, b, arg3));
8326 unlock_user(b, arg2, arg3);
8327 break;
8328 }
8329 case TARGET_NR_setxattr:
8330 case TARGET_NR_lsetxattr:
8331 {
8332 void *p, *n, *v = 0;
8333 if (arg3) {
8334 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8335 if (!v) {
8336 ret = -TARGET_EFAULT;
8337 break;
8338 }
8339 }
8340 p = lock_user_string(arg1);
8341 n = lock_user_string(arg2);
8342 if (p && n) {
8343 if (num == TARGET_NR_setxattr) {
8344 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8345 } else {
8346 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8347 }
8348 } else {
8349 ret = -TARGET_EFAULT;
8350 }
8351 unlock_user(p, arg1, 0);
8352 unlock_user(n, arg2, 0);
8353 unlock_user(v, arg3, 0);
8354 }
8355 break;
8356 case TARGET_NR_fsetxattr:
8357 {
8358 void *n, *v = 0;
8359 if (arg3) {
8360 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8361 if (!v) {
8362 ret = -TARGET_EFAULT;
8363 break;
8364 }
8365 }
8366 n = lock_user_string(arg2);
8367 if (n) {
8368 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8369 } else {
8370 ret = -TARGET_EFAULT;
8371 }
8372 unlock_user(n, arg2, 0);
8373 unlock_user(v, arg3, 0);
8374 }
8375 break;
8376 case TARGET_NR_getxattr:
8377 case TARGET_NR_lgetxattr:
8378 {
8379 void *p, *n, *v = 0;
8380 if (arg3) {
8381 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8382 if (!v) {
8383 ret = -TARGET_EFAULT;
8384 break;
8385 }
8386 }
8387 p = lock_user_string(arg1);
8388 n = lock_user_string(arg2);
8389 if (p && n) {
8390 if (num == TARGET_NR_getxattr) {
8391 ret = get_errno(getxattr(p, n, v, arg4));
8392 } else {
8393 ret = get_errno(lgetxattr(p, n, v, arg4));
8394 }
8395 } else {
8396 ret = -TARGET_EFAULT;
8397 }
8398 unlock_user(p, arg1, 0);
8399 unlock_user(n, arg2, 0);
8400 unlock_user(v, arg3, arg4);
8401 }
8402 break;
8403 case TARGET_NR_fgetxattr:
8404 {
8405 void *n, *v = 0;
8406 if (arg3) {
8407 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8408 if (!v) {
8409 ret = -TARGET_EFAULT;
8410 break;
8411 }
8412 }
8413 n = lock_user_string(arg2);
8414 if (n) {
8415 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8416 } else {
8417 ret = -TARGET_EFAULT;
8418 }
8419 unlock_user(n, arg2, 0);
8420 unlock_user(v, arg3, arg4);
8421 }
8422 break;
8423 case TARGET_NR_removexattr:
8424 case TARGET_NR_lremovexattr:
8425 {
8426 void *p, *n;
8427 p = lock_user_string(arg1);
8428 n = lock_user_string(arg2);
8429 if (p && n) {
8430 if (num == TARGET_NR_removexattr) {
8431 ret = get_errno(removexattr(p, n));
8432 } else {
8433 ret = get_errno(lremovexattr(p, n));
8434 }
8435 } else {
8436 ret = -TARGET_EFAULT;
8437 }
8438 unlock_user(p, arg1, 0);
8439 unlock_user(n, arg2, 0);
8440 }
8441 break;
8442 case TARGET_NR_fremovexattr:
8443 {
8444 void *n;
8445 n = lock_user_string(arg2);
8446 if (n) {
8447 ret = get_errno(fremovexattr(arg1, n));
8448 } else {
8449 ret = -TARGET_EFAULT;
8450 }
8451 unlock_user(n, arg2, 0);
8452 }
8453 break;
8454 #endif
8455 #endif /* CONFIG_ATTR */
8456 #ifdef TARGET_NR_set_thread_area
8457 case TARGET_NR_set_thread_area:
8458 #if defined(TARGET_MIPS)
8459 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8460 ret = 0;
8461 break;
8462 #elif defined(TARGET_CRIS)
8463 if (arg1 & 0xff)
8464 ret = -TARGET_EINVAL;
8465 else {
8466 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8467 ret = 0;
8468 }
8469 break;
8470 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8471 ret = do_set_thread_area(cpu_env, arg1);
8472 break;
8473 #else
8474 goto unimplemented_nowarn;
8475 #endif
8476 #endif
8477 #ifdef TARGET_NR_get_thread_area
8478 case TARGET_NR_get_thread_area:
8479 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8480 ret = do_get_thread_area(cpu_env, arg1);
8481 #else
8482 goto unimplemented_nowarn;
8483 #endif
8484 #endif
8485 #ifdef TARGET_NR_getdomainname
8486 case TARGET_NR_getdomainname:
8487 goto unimplemented_nowarn;
8488 #endif
8489
8490 #ifdef TARGET_NR_clock_gettime
8491 case TARGET_NR_clock_gettime:
8492 {
8493 struct timespec ts;
8494 ret = get_errno(clock_gettime(arg1, &ts));
8495 if (!is_error(ret)) {
8496 host_to_target_timespec(arg2, &ts);
8497 }
8498 break;
8499 }
8500 #endif
8501 #ifdef TARGET_NR_clock_getres
8502 case TARGET_NR_clock_getres:
8503 {
8504 struct timespec ts;
8505 ret = get_errno(clock_getres(arg1, &ts));
8506 if (!is_error(ret)) {
8507 host_to_target_timespec(arg2, &ts);
8508 }
8509 break;
8510 }
8511 #endif
8512 #ifdef TARGET_NR_clock_nanosleep
8513 case TARGET_NR_clock_nanosleep:
8514 {
8515 struct timespec ts;
8516 target_to_host_timespec(&ts, arg3);
8517 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8518 if (arg4)
8519 host_to_target_timespec(arg4, &ts);
8520 break;
8521 }
8522 #endif
8523
8524 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8525 case TARGET_NR_set_tid_address:
8526 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8527 break;
8528 #endif
8529
8530 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8531 case TARGET_NR_tkill:
8532 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8533 break;
8534 #endif
8535
8536 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8537 case TARGET_NR_tgkill:
8538 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8539 target_to_host_signal(arg3)));
8540 break;
8541 #endif
8542
8543 #ifdef TARGET_NR_set_robust_list
8544 case TARGET_NR_set_robust_list:
8545 goto unimplemented_nowarn;
8546 #endif
8547
8548 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8549 case TARGET_NR_utimensat:
8550 {
8551 struct timespec *tsp, ts[2];
8552 if (!arg3) {
8553 tsp = NULL;
8554 } else {
8555 target_to_host_timespec(ts, arg3);
8556 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8557 tsp = ts;
8558 }
8559 if (!arg2)
8560 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8561 else {
8562 if (!(p = lock_user_string(arg2))) {
8563 ret = -TARGET_EFAULT;
8564 goto fail;
8565 }
8566 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8567 unlock_user(p, arg2, 0);
8568 }
8569 }
8570 break;
8571 #endif
8572 #if defined(CONFIG_USE_NPTL)
8573 case TARGET_NR_futex:
8574 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8575 break;
8576 #endif
8577 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8578 case TARGET_NR_inotify_init:
8579 ret = get_errno(sys_inotify_init());
8580 break;
8581 #endif
8582 #ifdef CONFIG_INOTIFY1
8583 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8584 case TARGET_NR_inotify_init1:
8585 ret = get_errno(sys_inotify_init1(arg1));
8586 break;
8587 #endif
8588 #endif
8589 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8590 case TARGET_NR_inotify_add_watch:
8591 p = lock_user_string(arg2);
8592 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8593 unlock_user(p, arg2, 0);
8594 break;
8595 #endif
8596 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8597 case TARGET_NR_inotify_rm_watch:
8598 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8599 break;
8600 #endif
8601
8602 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8603 case TARGET_NR_mq_open:
8604 {
8605 struct mq_attr posix_mq_attr;
8606
8607 p = lock_user_string(arg1 - 1);
8608 if (arg4 != 0)
8609 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8610 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8611 unlock_user (p, arg1, 0);
8612 }
8613 break;
8614
8615 case TARGET_NR_mq_unlink:
8616 p = lock_user_string(arg1 - 1);
8617 ret = get_errno(mq_unlink(p));
8618 unlock_user (p, arg1, 0);
8619 break;
8620
8621 case TARGET_NR_mq_timedsend:
8622 {
8623 struct timespec ts;
8624
8625 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8626 if (arg5 != 0) {
8627 target_to_host_timespec(&ts, arg5);
8628 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8629 host_to_target_timespec(arg5, &ts);
8630 }
8631 else
8632 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8633 unlock_user (p, arg2, arg3);
8634 }
8635 break;
8636
8637 case TARGET_NR_mq_timedreceive:
8638 {
8639 struct timespec ts;
8640 unsigned int prio;
8641
8642 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8643 if (arg5 != 0) {
8644 target_to_host_timespec(&ts, arg5);
8645 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8646 host_to_target_timespec(arg5, &ts);
8647 }
8648 else
8649 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8650 unlock_user (p, arg2, arg3);
8651 if (arg4 != 0)
8652 put_user_u32(prio, arg4);
8653 }
8654 break;
8655
8656 /* Not implemented for now... */
8657 /* case TARGET_NR_mq_notify: */
8658 /* break; */
8659
8660 case TARGET_NR_mq_getsetattr:
8661 {
8662 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8663 ret = 0;
8664 if (arg3 != 0) {
8665 ret = mq_getattr(arg1, &posix_mq_attr_out);
8666 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8667 }
8668 if (arg2 != 0) {
8669 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8670 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8671 }
8672
8673 }
8674 break;
8675 #endif
8676
8677 #ifdef CONFIG_SPLICE
8678 #ifdef TARGET_NR_tee
8679 case TARGET_NR_tee:
8680 {
8681 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8682 }
8683 break;
8684 #endif
8685 #ifdef TARGET_NR_splice
8686 case TARGET_NR_splice:
8687 {
8688 loff_t loff_in, loff_out;
8689 loff_t *ploff_in = NULL, *ploff_out = NULL;
8690 if(arg2) {
8691 get_user_u64(loff_in, arg2);
8692 ploff_in = &loff_in;
8693 }
8694 if(arg4) {
8695 get_user_u64(loff_out, arg2);
8696 ploff_out = &loff_out;
8697 }
8698 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8699 }
8700 break;
8701 #endif
8702 #ifdef TARGET_NR_vmsplice
8703 case TARGET_NR_vmsplice:
8704 {
8705 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8706 if (vec != NULL) {
8707 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8708 unlock_iovec(vec, arg2, arg3, 0);
8709 } else {
8710 ret = -host_to_target_errno(errno);
8711 }
8712 }
8713 break;
8714 #endif
8715 #endif /* CONFIG_SPLICE */
8716 #ifdef CONFIG_EVENTFD
8717 #if defined(TARGET_NR_eventfd)
8718 case TARGET_NR_eventfd:
8719 ret = get_errno(eventfd(arg1, 0));
8720 break;
8721 #endif
8722 #if defined(TARGET_NR_eventfd2)
8723 case TARGET_NR_eventfd2:
8724 ret = get_errno(eventfd(arg1, arg2));
8725 break;
8726 #endif
8727 #endif /* CONFIG_EVENTFD */
8728 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8729 case TARGET_NR_fallocate:
8730 #if TARGET_ABI_BITS == 32
8731 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8732 target_offset64(arg5, arg6)));
8733 #else
8734 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8735 #endif
8736 break;
8737 #endif
8738 #if defined(CONFIG_SYNC_FILE_RANGE)
8739 #if defined(TARGET_NR_sync_file_range)
8740 case TARGET_NR_sync_file_range:
8741 #if TARGET_ABI_BITS == 32
8742 #if defined(TARGET_MIPS)
8743 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8744 target_offset64(arg5, arg6), arg7));
8745 #else
8746 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8747 target_offset64(arg4, arg5), arg6));
8748 #endif /* !TARGET_MIPS */
8749 #else
8750 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8751 #endif
8752 break;
8753 #endif
8754 #if defined(TARGET_NR_sync_file_range2)
8755 case TARGET_NR_sync_file_range2:
8756 /* This is like sync_file_range but the arguments are reordered */
8757 #if TARGET_ABI_BITS == 32
8758 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8759 target_offset64(arg5, arg6), arg2));
8760 #else
8761 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8762 #endif
8763 break;
8764 #endif
8765 #endif
8766 #if defined(CONFIG_EPOLL)
8767 #if defined(TARGET_NR_epoll_create)
8768 case TARGET_NR_epoll_create:
8769 ret = get_errno(epoll_create(arg1));
8770 break;
8771 #endif
8772 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8773 case TARGET_NR_epoll_create1:
8774 ret = get_errno(epoll_create1(arg1));
8775 break;
8776 #endif
8777 #if defined(TARGET_NR_epoll_ctl)
8778 case TARGET_NR_epoll_ctl:
8779 {
8780 struct epoll_event ep;
8781 struct epoll_event *epp = 0;
8782 if (arg4) {
8783 struct target_epoll_event *target_ep;
8784 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8785 goto efault;
8786 }
8787 ep.events = tswap32(target_ep->events);
8788 /* The epoll_data_t union is just opaque data to the kernel,
8789 * so we transfer all 64 bits across and need not worry what
8790 * actual data type it is.
8791 */
8792 ep.data.u64 = tswap64(target_ep->data.u64);
8793 unlock_user_struct(target_ep, arg4, 0);
8794 epp = &ep;
8795 }
8796 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8797 break;
8798 }
8799 #endif
8800
8801 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8802 #define IMPLEMENT_EPOLL_PWAIT
8803 #endif
8804 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8805 #if defined(TARGET_NR_epoll_wait)
8806 case TARGET_NR_epoll_wait:
8807 #endif
8808 #if defined(IMPLEMENT_EPOLL_PWAIT)
8809 case TARGET_NR_epoll_pwait:
8810 #endif
8811 {
8812 struct target_epoll_event *target_ep;
8813 struct epoll_event *ep;
8814 int epfd = arg1;
8815 int maxevents = arg3;
8816 int timeout = arg4;
8817
8818 target_ep = lock_user(VERIFY_WRITE, arg2,
8819 maxevents * sizeof(struct target_epoll_event), 1);
8820 if (!target_ep) {
8821 goto efault;
8822 }
8823
8824 ep = alloca(maxevents * sizeof(struct epoll_event));
8825
8826 switch (num) {
8827 #if defined(IMPLEMENT_EPOLL_PWAIT)
8828 case TARGET_NR_epoll_pwait:
8829 {
8830 target_sigset_t *target_set;
8831 sigset_t _set, *set = &_set;
8832
8833 if (arg5) {
8834 target_set = lock_user(VERIFY_READ, arg5,
8835 sizeof(target_sigset_t), 1);
8836 if (!target_set) {
8837 unlock_user(target_ep, arg2, 0);
8838 goto efault;
8839 }
8840 target_to_host_sigset(set, target_set);
8841 unlock_user(target_set, arg5, 0);
8842 } else {
8843 set = NULL;
8844 }
8845
8846 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8847 break;
8848 }
8849 #endif
8850 #if defined(TARGET_NR_epoll_wait)
8851 case TARGET_NR_epoll_wait:
8852 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8853 break;
8854 #endif
8855 default:
8856 ret = -TARGET_ENOSYS;
8857 }
8858 if (!is_error(ret)) {
8859 int i;
8860 for (i = 0; i < ret; i++) {
8861 target_ep[i].events = tswap32(ep[i].events);
8862 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8863 }
8864 }
8865 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8866 break;
8867 }
8868 #endif
8869 #endif
8870 #ifdef TARGET_NR_prlimit64
8871 case TARGET_NR_prlimit64:
8872 {
8873 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8874 struct target_rlimit64 *target_rnew, *target_rold;
8875 struct host_rlimit64 rnew, rold, *rnewp = 0;
8876 if (arg3) {
8877 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8878 goto efault;
8879 }
8880 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8881 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8882 unlock_user_struct(target_rnew, arg3, 0);
8883 rnewp = &rnew;
8884 }
8885
8886 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8887 if (!is_error(ret) && arg4) {
8888 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8889 goto efault;
8890 }
8891 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8892 target_rold->rlim_max = tswap64(rold.rlim_max);
8893 unlock_user_struct(target_rold, arg4, 1);
8894 }
8895 break;
8896 }
8897 #endif
8898 #ifdef TARGET_NR_gethostname
8899 case TARGET_NR_gethostname:
8900 {
8901 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8902 if (name) {
8903 ret = get_errno(gethostname(name, arg2));
8904 unlock_user(name, arg1, arg2);
8905 } else {
8906 ret = -TARGET_EFAULT;
8907 }
8908 break;
8909 }
8910 #endif
8911 default:
8912 unimplemented:
8913 gemu_log("qemu: Unsupported syscall: %d\n", num);
8914 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8915 unimplemented_nowarn:
8916 #endif
8917 ret = -TARGET_ENOSYS;
8918 break;
8919 }
8920 fail:
8921 #ifdef DEBUG
8922 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8923 #endif
8924 if(do_strace)
8925 print_syscall_ret(num, ret);
8926 return ret;
8927 efault:
8928 ret = -TARGET_EFAULT;
8929 goto fail;
8930 }