]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
Merge remote-tracking branch 'kraxel/usb.75' into staging
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81
82 #define termios host_termios
83 #define winsize host_winsize
84 #define termio host_termio
85 #define sgttyb host_sgttyb /* same as target */
86 #define tchars host_tchars /* same as target */
87 #define ltchars host_ltchars /* same as target */
88
89 #include <linux/termios.h>
90 #include <linux/unistd.h>
91 #include <linux/utsname.h>
92 #include <linux/cdrom.h>
93 #include <linux/hdreg.h>
94 #include <linux/soundcard.h>
95 #include <linux/kd.h>
96 #include <linux/mtio.h>
97 #include <linux/fs.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
100 #endif
101 #include <linux/fb.h>
102 #include <linux/vt.h>
103 #include <linux/dm-ioctl.h>
104 #include "linux_loop.h"
105 #include "cpu-uname.h"
106
107 #include "qemu.h"
108
109 #if defined(CONFIG_USE_NPTL)
110 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
111 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
112 #else
113 /* XXX: Hardcode the above values. */
114 #define CLONE_NPTL_FLAGS2 0
115 #endif
116
117 //#define DEBUG
118
119 //#include <linux/msdos_fs.h>
120 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
121 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
122
123
124 #undef _syscall0
125 #undef _syscall1
126 #undef _syscall2
127 #undef _syscall3
128 #undef _syscall4
129 #undef _syscall5
130 #undef _syscall6
131
132 #define _syscall0(type,name) \
133 static type name (void) \
134 { \
135 return syscall(__NR_##name); \
136 }
137
138 #define _syscall1(type,name,type1,arg1) \
139 static type name (type1 arg1) \
140 { \
141 return syscall(__NR_##name, arg1); \
142 }
143
144 #define _syscall2(type,name,type1,arg1,type2,arg2) \
145 static type name (type1 arg1,type2 arg2) \
146 { \
147 return syscall(__NR_##name, arg1, arg2); \
148 }
149
150 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
151 static type name (type1 arg1,type2 arg2,type3 arg3) \
152 { \
153 return syscall(__NR_##name, arg1, arg2, arg3); \
154 }
155
156 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
158 { \
159 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
160 }
161
162 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
165 { \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
167 }
168
169
170 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 type5,arg5,type6,arg6) \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
173 type6 arg6) \
174 { \
175 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
176 }
177
178
179 #define __NR_sys_uname __NR_uname
180 #define __NR_sys_faccessat __NR_faccessat
181 #define __NR_sys_fchmodat __NR_fchmodat
182 #define __NR_sys_fchownat __NR_fchownat
183 #define __NR_sys_fstatat64 __NR_fstatat64
184 #define __NR_sys_futimesat __NR_futimesat
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_linkat __NR_linkat
190 #define __NR_sys_mkdirat __NR_mkdirat
191 #define __NR_sys_mknodat __NR_mknodat
192 #define __NR_sys_newfstatat __NR_newfstatat
193 #define __NR_sys_openat __NR_openat
194 #define __NR_sys_readlinkat __NR_readlinkat
195 #define __NR_sys_renameat __NR_renameat
196 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
197 #define __NR_sys_symlinkat __NR_symlinkat
198 #define __NR_sys_syslog __NR_syslog
199 #define __NR_sys_tgkill __NR_tgkill
200 #define __NR_sys_tkill __NR_tkill
201 #define __NR_sys_unlinkat __NR_unlinkat
202 #define __NR_sys_utimensat __NR_utimensat
203 #define __NR_sys_futex __NR_futex
204 #define __NR_sys_inotify_init __NR_inotify_init
205 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
206 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
207
208 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
209 defined(__s390x__)
210 #define __NR__llseek __NR_lseek
211 #endif
212
213 #ifdef __NR_gettid
214 _syscall0(int, gettid)
215 #else
216 /* This is a replacement for the host gettid() and must return a host
217 errno. */
218 static int gettid(void) {
219 return -ENOSYS;
220 }
221 #endif
222 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
223 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
224 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
225 #endif
226 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
227 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
228 loff_t *, res, uint, wh);
229 #endif
230 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
231 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
232 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
233 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
234 #endif
235 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
236 _syscall2(int,sys_tkill,int,tid,int,sig)
237 #endif
238 #ifdef __NR_exit_group
239 _syscall1(int,exit_group,int,error_code)
240 #endif
241 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
242 _syscall1(int,set_tid_address,int *,tidptr)
243 #endif
244 #if defined(CONFIG_USE_NPTL)
245 #if defined(TARGET_NR_futex) && defined(__NR_futex)
246 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
247 const struct timespec *,timeout,int *,uaddr2,int,val3)
248 #endif
249 #endif
250 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
251 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
252 unsigned long *, user_mask_ptr);
253 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
254 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
255 unsigned long *, user_mask_ptr);
256 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
257 void *, arg);
258
259 static bitmask_transtbl fcntl_flags_tbl[] = {
260 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
261 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
262 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
263 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
264 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
265 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
266 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
267 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
268 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
269 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
270 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
271 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
272 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
275 #endif
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
278 #endif
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
281 #endif
282 #if defined(O_PATH)
283 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
284 #endif
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
288 #endif
289 { 0, 0, 0, 0 }
290 };
291
292 #define COPY_UTSNAME_FIELD(dest, src) \
293 do { \
294 /* __NEW_UTS_LEN doesn't include terminating null */ \
295 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
296 (dest)[__NEW_UTS_LEN] = '\0'; \
297 } while (0)
298
299 static int sys_uname(struct new_utsname *buf)
300 {
301 struct utsname uts_buf;
302
303 if (uname(&uts_buf) < 0)
304 return (-1);
305
306 /*
307 * Just in case these have some differences, we
308 * translate utsname to new_utsname (which is the
309 * struct linux kernel uses).
310 */
311
312 memset(buf, 0, sizeof(*buf));
313 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
314 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
315 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
316 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
317 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
318 #ifdef _GNU_SOURCE
319 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
320 #endif
321 return (0);
322
323 #undef COPY_UTSNAME_FIELD
324 }
325
326 static int sys_getcwd1(char *buf, size_t size)
327 {
328 if (getcwd(buf, size) == NULL) {
329 /* getcwd() sets errno */
330 return (-1);
331 }
332 return strlen(buf)+1;
333 }
334
335 #ifdef CONFIG_ATFILE
336 /*
337 * Host system seems to have atfile syscall stubs available. We
338 * now enable them one by one as specified by target syscall_nr.h.
339 */
340
341 #ifdef TARGET_NR_faccessat
342 static int sys_faccessat(int dirfd, const char *pathname, int mode)
343 {
344 return (faccessat(dirfd, pathname, mode, 0));
345 }
346 #endif
347 #ifdef TARGET_NR_fchmodat
348 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
349 {
350 return (fchmodat(dirfd, pathname, mode, 0));
351 }
352 #endif
353 #if defined(TARGET_NR_fchownat)
354 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
355 gid_t group, int flags)
356 {
357 return (fchownat(dirfd, pathname, owner, group, flags));
358 }
359 #endif
360 #ifdef __NR_fstatat64
361 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
362 int flags)
363 {
364 return (fstatat(dirfd, pathname, buf, flags));
365 }
366 #endif
367 #ifdef __NR_newfstatat
368 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
369 int flags)
370 {
371 return (fstatat(dirfd, pathname, buf, flags));
372 }
373 #endif
374 #ifdef TARGET_NR_futimesat
375 static int sys_futimesat(int dirfd, const char *pathname,
376 const struct timeval times[2])
377 {
378 return (futimesat(dirfd, pathname, times));
379 }
380 #endif
381 #ifdef TARGET_NR_linkat
382 static int sys_linkat(int olddirfd, const char *oldpath,
383 int newdirfd, const char *newpath, int flags)
384 {
385 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
386 }
387 #endif
388 #ifdef TARGET_NR_mkdirat
389 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
390 {
391 return (mkdirat(dirfd, pathname, mode));
392 }
393 #endif
394 #ifdef TARGET_NR_mknodat
395 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
396 dev_t dev)
397 {
398 return (mknodat(dirfd, pathname, mode, dev));
399 }
400 #endif
401 #ifdef TARGET_NR_openat
402 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
403 {
404 /*
405 * open(2) has extra parameter 'mode' when called with
406 * flag O_CREAT.
407 */
408 if ((flags & O_CREAT) != 0) {
409 return (openat(dirfd, pathname, flags, mode));
410 }
411 return (openat(dirfd, pathname, flags));
412 }
413 #endif
414 #ifdef TARGET_NR_readlinkat
415 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
416 {
417 return (readlinkat(dirfd, pathname, buf, bufsiz));
418 }
419 #endif
420 #ifdef TARGET_NR_renameat
421 static int sys_renameat(int olddirfd, const char *oldpath,
422 int newdirfd, const char *newpath)
423 {
424 return (renameat(olddirfd, oldpath, newdirfd, newpath));
425 }
426 #endif
427 #ifdef TARGET_NR_symlinkat
428 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
429 {
430 return (symlinkat(oldpath, newdirfd, newpath));
431 }
432 #endif
433 #ifdef TARGET_NR_unlinkat
434 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
435 {
436 return (unlinkat(dirfd, pathname, flags));
437 }
438 #endif
439 #else /* !CONFIG_ATFILE */
440
441 /*
442 * Try direct syscalls instead
443 */
444 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
445 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
446 #endif
447 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
448 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
449 #endif
450 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
451 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
452 uid_t,owner,gid_t,group,int,flags)
453 #endif
454 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
455 defined(__NR_fstatat64)
456 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
457 struct stat *,buf,int,flags)
458 #endif
459 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
460 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
461 const struct timeval *,times)
462 #endif
463 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
464 defined(__NR_newfstatat)
465 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
466 struct stat *,buf,int,flags)
467 #endif
468 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
469 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
470 int,newdirfd,const char *,newpath,int,flags)
471 #endif
472 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
473 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
474 #endif
475 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
476 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
477 mode_t,mode,dev_t,dev)
478 #endif
479 #if defined(TARGET_NR_openat) && defined(__NR_openat)
480 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
481 #endif
482 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
483 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
484 char *,buf,size_t,bufsize)
485 #endif
486 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
487 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
488 int,newdirfd,const char *,newpath)
489 #endif
490 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
491 _syscall3(int,sys_symlinkat,const char *,oldpath,
492 int,newdirfd,const char *,newpath)
493 #endif
494 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
495 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
496 #endif
497
498 #endif /* CONFIG_ATFILE */
499
500 #ifdef CONFIG_UTIMENSAT
501 static int sys_utimensat(int dirfd, const char *pathname,
502 const struct timespec times[2], int flags)
503 {
504 if (pathname == NULL)
505 return futimens(dirfd, times);
506 else
507 return utimensat(dirfd, pathname, times, flags);
508 }
509 #else
510 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
511 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
512 const struct timespec *,tsp,int,flags)
513 #endif
514 #endif /* CONFIG_UTIMENSAT */
515
516 #ifdef CONFIG_INOTIFY
517 #include <sys/inotify.h>
518
519 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
520 static int sys_inotify_init(void)
521 {
522 return (inotify_init());
523 }
524 #endif
525 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
526 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
527 {
528 return (inotify_add_watch(fd, pathname, mask));
529 }
530 #endif
531 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
532 static int sys_inotify_rm_watch(int fd, int32_t wd)
533 {
534 return (inotify_rm_watch(fd, wd));
535 }
536 #endif
537 #ifdef CONFIG_INOTIFY1
538 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
539 static int sys_inotify_init1(int flags)
540 {
541 return (inotify_init1(flags));
542 }
543 #endif
544 #endif
545 #else
546 /* Userspace can usually survive runtime without inotify */
547 #undef TARGET_NR_inotify_init
548 #undef TARGET_NR_inotify_init1
549 #undef TARGET_NR_inotify_add_watch
550 #undef TARGET_NR_inotify_rm_watch
551 #endif /* CONFIG_INOTIFY */
552
553 #if defined(TARGET_NR_ppoll)
554 #ifndef __NR_ppoll
555 # define __NR_ppoll -1
556 #endif
557 #define __NR_sys_ppoll __NR_ppoll
558 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
559 struct timespec *, timeout, const __sigset_t *, sigmask,
560 size_t, sigsetsize)
561 #endif
562
563 #if defined(TARGET_NR_pselect6)
564 #ifndef __NR_pselect6
565 # define __NR_pselect6 -1
566 #endif
567 #define __NR_sys_pselect6 __NR_pselect6
568 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
569 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
570 #endif
571
572 #if defined(TARGET_NR_prlimit64)
573 #ifndef __NR_prlimit64
574 # define __NR_prlimit64 -1
575 #endif
576 #define __NR_sys_prlimit64 __NR_prlimit64
577 /* The glibc rlimit structure may not be that used by the underlying syscall */
578 struct host_rlimit64 {
579 uint64_t rlim_cur;
580 uint64_t rlim_max;
581 };
582 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
583 const struct host_rlimit64 *, new_limit,
584 struct host_rlimit64 *, old_limit)
585 #endif
586
587 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
588 #ifdef TARGET_ARM
589 static inline int regpairs_aligned(void *cpu_env) {
590 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
591 }
592 #elif defined(TARGET_MIPS)
593 static inline int regpairs_aligned(void *cpu_env) { return 1; }
594 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
595 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
596 * of registers which translates to the same as ARM/MIPS, because we start with
597 * r3 as arg1 */
598 static inline int regpairs_aligned(void *cpu_env) { return 1; }
599 #else
600 static inline int regpairs_aligned(void *cpu_env) { return 0; }
601 #endif
602
603 #define ERRNO_TABLE_SIZE 1200
604
605 /* target_to_host_errno_table[] is initialized from
606 * host_to_target_errno_table[] in syscall_init(). */
607 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
608 };
609
610 /*
611 * This list is the union of errno values overridden in asm-<arch>/errno.h
612 * minus the errnos that are not actually generic to all archs.
613 */
614 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
615 [EIDRM] = TARGET_EIDRM,
616 [ECHRNG] = TARGET_ECHRNG,
617 [EL2NSYNC] = TARGET_EL2NSYNC,
618 [EL3HLT] = TARGET_EL3HLT,
619 [EL3RST] = TARGET_EL3RST,
620 [ELNRNG] = TARGET_ELNRNG,
621 [EUNATCH] = TARGET_EUNATCH,
622 [ENOCSI] = TARGET_ENOCSI,
623 [EL2HLT] = TARGET_EL2HLT,
624 [EDEADLK] = TARGET_EDEADLK,
625 [ENOLCK] = TARGET_ENOLCK,
626 [EBADE] = TARGET_EBADE,
627 [EBADR] = TARGET_EBADR,
628 [EXFULL] = TARGET_EXFULL,
629 [ENOANO] = TARGET_ENOANO,
630 [EBADRQC] = TARGET_EBADRQC,
631 [EBADSLT] = TARGET_EBADSLT,
632 [EBFONT] = TARGET_EBFONT,
633 [ENOSTR] = TARGET_ENOSTR,
634 [ENODATA] = TARGET_ENODATA,
635 [ETIME] = TARGET_ETIME,
636 [ENOSR] = TARGET_ENOSR,
637 [ENONET] = TARGET_ENONET,
638 [ENOPKG] = TARGET_ENOPKG,
639 [EREMOTE] = TARGET_EREMOTE,
640 [ENOLINK] = TARGET_ENOLINK,
641 [EADV] = TARGET_EADV,
642 [ESRMNT] = TARGET_ESRMNT,
643 [ECOMM] = TARGET_ECOMM,
644 [EPROTO] = TARGET_EPROTO,
645 [EDOTDOT] = TARGET_EDOTDOT,
646 [EMULTIHOP] = TARGET_EMULTIHOP,
647 [EBADMSG] = TARGET_EBADMSG,
648 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
649 [EOVERFLOW] = TARGET_EOVERFLOW,
650 [ENOTUNIQ] = TARGET_ENOTUNIQ,
651 [EBADFD] = TARGET_EBADFD,
652 [EREMCHG] = TARGET_EREMCHG,
653 [ELIBACC] = TARGET_ELIBACC,
654 [ELIBBAD] = TARGET_ELIBBAD,
655 [ELIBSCN] = TARGET_ELIBSCN,
656 [ELIBMAX] = TARGET_ELIBMAX,
657 [ELIBEXEC] = TARGET_ELIBEXEC,
658 [EILSEQ] = TARGET_EILSEQ,
659 [ENOSYS] = TARGET_ENOSYS,
660 [ELOOP] = TARGET_ELOOP,
661 [ERESTART] = TARGET_ERESTART,
662 [ESTRPIPE] = TARGET_ESTRPIPE,
663 [ENOTEMPTY] = TARGET_ENOTEMPTY,
664 [EUSERS] = TARGET_EUSERS,
665 [ENOTSOCK] = TARGET_ENOTSOCK,
666 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
667 [EMSGSIZE] = TARGET_EMSGSIZE,
668 [EPROTOTYPE] = TARGET_EPROTOTYPE,
669 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
670 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
671 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
672 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
673 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
674 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
675 [EADDRINUSE] = TARGET_EADDRINUSE,
676 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
677 [ENETDOWN] = TARGET_ENETDOWN,
678 [ENETUNREACH] = TARGET_ENETUNREACH,
679 [ENETRESET] = TARGET_ENETRESET,
680 [ECONNABORTED] = TARGET_ECONNABORTED,
681 [ECONNRESET] = TARGET_ECONNRESET,
682 [ENOBUFS] = TARGET_ENOBUFS,
683 [EISCONN] = TARGET_EISCONN,
684 [ENOTCONN] = TARGET_ENOTCONN,
685 [EUCLEAN] = TARGET_EUCLEAN,
686 [ENOTNAM] = TARGET_ENOTNAM,
687 [ENAVAIL] = TARGET_ENAVAIL,
688 [EISNAM] = TARGET_EISNAM,
689 [EREMOTEIO] = TARGET_EREMOTEIO,
690 [ESHUTDOWN] = TARGET_ESHUTDOWN,
691 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
692 [ETIMEDOUT] = TARGET_ETIMEDOUT,
693 [ECONNREFUSED] = TARGET_ECONNREFUSED,
694 [EHOSTDOWN] = TARGET_EHOSTDOWN,
695 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
696 [EALREADY] = TARGET_EALREADY,
697 [EINPROGRESS] = TARGET_EINPROGRESS,
698 [ESTALE] = TARGET_ESTALE,
699 [ECANCELED] = TARGET_ECANCELED,
700 [ENOMEDIUM] = TARGET_ENOMEDIUM,
701 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
702 #ifdef ENOKEY
703 [ENOKEY] = TARGET_ENOKEY,
704 #endif
705 #ifdef EKEYEXPIRED
706 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
707 #endif
708 #ifdef EKEYREVOKED
709 [EKEYREVOKED] = TARGET_EKEYREVOKED,
710 #endif
711 #ifdef EKEYREJECTED
712 [EKEYREJECTED] = TARGET_EKEYREJECTED,
713 #endif
714 #ifdef EOWNERDEAD
715 [EOWNERDEAD] = TARGET_EOWNERDEAD,
716 #endif
717 #ifdef ENOTRECOVERABLE
718 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
719 #endif
720 };
721
722 static inline int host_to_target_errno(int err)
723 {
724 if(host_to_target_errno_table[err])
725 return host_to_target_errno_table[err];
726 return err;
727 }
728
729 static inline int target_to_host_errno(int err)
730 {
731 if (target_to_host_errno_table[err])
732 return target_to_host_errno_table[err];
733 return err;
734 }
735
736 static inline abi_long get_errno(abi_long ret)
737 {
738 if (ret == -1)
739 return -host_to_target_errno(errno);
740 else
741 return ret;
742 }
743
744 static inline int is_error(abi_long ret)
745 {
746 return (abi_ulong)ret >= (abi_ulong)(-4096);
747 }
748
749 char *target_strerror(int err)
750 {
751 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
752 return NULL;
753 }
754 return strerror(target_to_host_errno(err));
755 }
756
757 static abi_ulong target_brk;
758 static abi_ulong target_original_brk;
759 static abi_ulong brk_page;
760
761 void target_set_brk(abi_ulong new_brk)
762 {
763 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
764 brk_page = HOST_PAGE_ALIGN(target_brk);
765 }
766
767 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
768 #define DEBUGF_BRK(message, args...)
769
770 /* do_brk() must return target values and target errnos. */
771 abi_long do_brk(abi_ulong new_brk)
772 {
773 abi_long mapped_addr;
774 int new_alloc_size;
775
776 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
777
778 if (!new_brk) {
779 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
780 return target_brk;
781 }
782 if (new_brk < target_original_brk) {
783 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
784 target_brk);
785 return target_brk;
786 }
787
788 /* If the new brk is less than the highest page reserved to the
789 * target heap allocation, set it and we're almost done... */
790 if (new_brk <= brk_page) {
791 /* Heap contents are initialized to zero, as for anonymous
792 * mapped pages. */
793 if (new_brk > target_brk) {
794 memset(g2h(target_brk), 0, new_brk - target_brk);
795 }
796 target_brk = new_brk;
797 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
798 return target_brk;
799 }
800
801 /* We need to allocate more memory after the brk... Note that
802 * we don't use MAP_FIXED because that will map over the top of
803 * any existing mapping (like the one with the host libc or qemu
804 * itself); instead we treat "mapped but at wrong address" as
805 * a failure and unmap again.
806 */
807 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
808 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
809 PROT_READ|PROT_WRITE,
810 MAP_ANON|MAP_PRIVATE, 0, 0));
811
812 if (mapped_addr == brk_page) {
813 /* Heap contents are initialized to zero, as for anonymous
814 * mapped pages. Technically the new pages are already
815 * initialized to zero since they *are* anonymous mapped
816 * pages, however we have to take care with the contents that
817 * come from the remaining part of the previous page: it may
818 * contains garbage data due to a previous heap usage (grown
819 * then shrunken). */
820 memset(g2h(target_brk), 0, brk_page - target_brk);
821
822 target_brk = new_brk;
823 brk_page = HOST_PAGE_ALIGN(target_brk);
824 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
825 target_brk);
826 return target_brk;
827 } else if (mapped_addr != -1) {
828 /* Mapped but at wrong address, meaning there wasn't actually
829 * enough space for this brk.
830 */
831 target_munmap(mapped_addr, new_alloc_size);
832 mapped_addr = -1;
833 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
834 }
835 else {
836 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
837 }
838
839 #if defined(TARGET_ALPHA)
840 /* We (partially) emulate OSF/1 on Alpha, which requires we
841 return a proper errno, not an unchanged brk value. */
842 return -TARGET_ENOMEM;
843 #endif
844 /* For everything else, return the previous break. */
845 return target_brk;
846 }
847
848 static inline abi_long copy_from_user_fdset(fd_set *fds,
849 abi_ulong target_fds_addr,
850 int n)
851 {
852 int i, nw, j, k;
853 abi_ulong b, *target_fds;
854
855 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
856 if (!(target_fds = lock_user(VERIFY_READ,
857 target_fds_addr,
858 sizeof(abi_ulong) * nw,
859 1)))
860 return -TARGET_EFAULT;
861
862 FD_ZERO(fds);
863 k = 0;
864 for (i = 0; i < nw; i++) {
865 /* grab the abi_ulong */
866 __get_user(b, &target_fds[i]);
867 for (j = 0; j < TARGET_ABI_BITS; j++) {
868 /* check the bit inside the abi_ulong */
869 if ((b >> j) & 1)
870 FD_SET(k, fds);
871 k++;
872 }
873 }
874
875 unlock_user(target_fds, target_fds_addr, 0);
876
877 return 0;
878 }
879
880 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
881 abi_ulong target_fds_addr,
882 int n)
883 {
884 if (target_fds_addr) {
885 if (copy_from_user_fdset(fds, target_fds_addr, n))
886 return -TARGET_EFAULT;
887 *fds_ptr = fds;
888 } else {
889 *fds_ptr = NULL;
890 }
891 return 0;
892 }
893
894 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
895 const fd_set *fds,
896 int n)
897 {
898 int i, nw, j, k;
899 abi_long v;
900 abi_ulong *target_fds;
901
902 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
903 if (!(target_fds = lock_user(VERIFY_WRITE,
904 target_fds_addr,
905 sizeof(abi_ulong) * nw,
906 0)))
907 return -TARGET_EFAULT;
908
909 k = 0;
910 for (i = 0; i < nw; i++) {
911 v = 0;
912 for (j = 0; j < TARGET_ABI_BITS; j++) {
913 v |= ((FD_ISSET(k, fds) != 0) << j);
914 k++;
915 }
916 __put_user(v, &target_fds[i]);
917 }
918
919 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
920
921 return 0;
922 }
923
924 #if defined(__alpha__)
925 #define HOST_HZ 1024
926 #else
927 #define HOST_HZ 100
928 #endif
929
930 static inline abi_long host_to_target_clock_t(long ticks)
931 {
932 #if HOST_HZ == TARGET_HZ
933 return ticks;
934 #else
935 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
936 #endif
937 }
938
939 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
940 const struct rusage *rusage)
941 {
942 struct target_rusage *target_rusage;
943
944 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
945 return -TARGET_EFAULT;
946 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
947 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
948 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
949 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
950 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
951 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
952 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
953 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
954 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
955 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
956 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
957 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
958 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
959 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
960 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
961 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
962 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
963 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
964 unlock_user_struct(target_rusage, target_addr, 1);
965
966 return 0;
967 }
968
969 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
970 {
971 abi_ulong target_rlim_swap;
972 rlim_t result;
973
974 target_rlim_swap = tswapal(target_rlim);
975 if (target_rlim_swap == TARGET_RLIM_INFINITY)
976 return RLIM_INFINITY;
977
978 result = target_rlim_swap;
979 if (target_rlim_swap != (rlim_t)result)
980 return RLIM_INFINITY;
981
982 return result;
983 }
984
985 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
986 {
987 abi_ulong target_rlim_swap;
988 abi_ulong result;
989
990 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
991 target_rlim_swap = TARGET_RLIM_INFINITY;
992 else
993 target_rlim_swap = rlim;
994 result = tswapal(target_rlim_swap);
995
996 return result;
997 }
998
999 static inline int target_to_host_resource(int code)
1000 {
1001 switch (code) {
1002 case TARGET_RLIMIT_AS:
1003 return RLIMIT_AS;
1004 case TARGET_RLIMIT_CORE:
1005 return RLIMIT_CORE;
1006 case TARGET_RLIMIT_CPU:
1007 return RLIMIT_CPU;
1008 case TARGET_RLIMIT_DATA:
1009 return RLIMIT_DATA;
1010 case TARGET_RLIMIT_FSIZE:
1011 return RLIMIT_FSIZE;
1012 case TARGET_RLIMIT_LOCKS:
1013 return RLIMIT_LOCKS;
1014 case TARGET_RLIMIT_MEMLOCK:
1015 return RLIMIT_MEMLOCK;
1016 case TARGET_RLIMIT_MSGQUEUE:
1017 return RLIMIT_MSGQUEUE;
1018 case TARGET_RLIMIT_NICE:
1019 return RLIMIT_NICE;
1020 case TARGET_RLIMIT_NOFILE:
1021 return RLIMIT_NOFILE;
1022 case TARGET_RLIMIT_NPROC:
1023 return RLIMIT_NPROC;
1024 case TARGET_RLIMIT_RSS:
1025 return RLIMIT_RSS;
1026 case TARGET_RLIMIT_RTPRIO:
1027 return RLIMIT_RTPRIO;
1028 case TARGET_RLIMIT_SIGPENDING:
1029 return RLIMIT_SIGPENDING;
1030 case TARGET_RLIMIT_STACK:
1031 return RLIMIT_STACK;
1032 default:
1033 return code;
1034 }
1035 }
1036
1037 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1038 abi_ulong target_tv_addr)
1039 {
1040 struct target_timeval *target_tv;
1041
1042 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1043 return -TARGET_EFAULT;
1044
1045 __get_user(tv->tv_sec, &target_tv->tv_sec);
1046 __get_user(tv->tv_usec, &target_tv->tv_usec);
1047
1048 unlock_user_struct(target_tv, target_tv_addr, 0);
1049
1050 return 0;
1051 }
1052
1053 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1054 const struct timeval *tv)
1055 {
1056 struct target_timeval *target_tv;
1057
1058 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1059 return -TARGET_EFAULT;
1060
1061 __put_user(tv->tv_sec, &target_tv->tv_sec);
1062 __put_user(tv->tv_usec, &target_tv->tv_usec);
1063
1064 unlock_user_struct(target_tv, target_tv_addr, 1);
1065
1066 return 0;
1067 }
1068
1069 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1070 #include <mqueue.h>
1071
1072 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1073 abi_ulong target_mq_attr_addr)
1074 {
1075 struct target_mq_attr *target_mq_attr;
1076
1077 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1078 target_mq_attr_addr, 1))
1079 return -TARGET_EFAULT;
1080
1081 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1082 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1083 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1084 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1085
1086 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1087
1088 return 0;
1089 }
1090
1091 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1092 const struct mq_attr *attr)
1093 {
1094 struct target_mq_attr *target_mq_attr;
1095
1096 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1097 target_mq_attr_addr, 0))
1098 return -TARGET_EFAULT;
1099
1100 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1101 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1102 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1103 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1104
1105 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1106
1107 return 0;
1108 }
1109 #endif
1110
1111 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1112 /* do_select() must return target values and target errnos. */
1113 static abi_long do_select(int n,
1114 abi_ulong rfd_addr, abi_ulong wfd_addr,
1115 abi_ulong efd_addr, abi_ulong target_tv_addr)
1116 {
1117 fd_set rfds, wfds, efds;
1118 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1119 struct timeval tv, *tv_ptr;
1120 abi_long ret;
1121
1122 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1123 if (ret) {
1124 return ret;
1125 }
1126 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1127 if (ret) {
1128 return ret;
1129 }
1130 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1131 if (ret) {
1132 return ret;
1133 }
1134
1135 if (target_tv_addr) {
1136 if (copy_from_user_timeval(&tv, target_tv_addr))
1137 return -TARGET_EFAULT;
1138 tv_ptr = &tv;
1139 } else {
1140 tv_ptr = NULL;
1141 }
1142
1143 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1144
1145 if (!is_error(ret)) {
1146 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1147 return -TARGET_EFAULT;
1148 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1149 return -TARGET_EFAULT;
1150 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1151 return -TARGET_EFAULT;
1152
1153 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1154 return -TARGET_EFAULT;
1155 }
1156
1157 return ret;
1158 }
1159 #endif
1160
1161 static abi_long do_pipe2(int host_pipe[], int flags)
1162 {
1163 #ifdef CONFIG_PIPE2
1164 return pipe2(host_pipe, flags);
1165 #else
1166 return -ENOSYS;
1167 #endif
1168 }
1169
1170 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1171 int flags, int is_pipe2)
1172 {
1173 int host_pipe[2];
1174 abi_long ret;
1175 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1176
1177 if (is_error(ret))
1178 return get_errno(ret);
1179
1180 /* Several targets have special calling conventions for the original
1181 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1182 if (!is_pipe2) {
1183 #if defined(TARGET_ALPHA)
1184 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1185 return host_pipe[0];
1186 #elif defined(TARGET_MIPS)
1187 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1188 return host_pipe[0];
1189 #elif defined(TARGET_SH4)
1190 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1191 return host_pipe[0];
1192 #endif
1193 }
1194
1195 if (put_user_s32(host_pipe[0], pipedes)
1196 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1197 return -TARGET_EFAULT;
1198 return get_errno(ret);
1199 }
1200
1201 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1202 abi_ulong target_addr,
1203 socklen_t len)
1204 {
1205 struct target_ip_mreqn *target_smreqn;
1206
1207 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1208 if (!target_smreqn)
1209 return -TARGET_EFAULT;
1210 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1211 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1212 if (len == sizeof(struct target_ip_mreqn))
1213 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1214 unlock_user(target_smreqn, target_addr, 0);
1215
1216 return 0;
1217 }
1218
1219 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1220 abi_ulong target_addr,
1221 socklen_t len)
1222 {
1223 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1224 sa_family_t sa_family;
1225 struct target_sockaddr *target_saddr;
1226
1227 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1228 if (!target_saddr)
1229 return -TARGET_EFAULT;
1230
1231 sa_family = tswap16(target_saddr->sa_family);
1232
1233 /* Oops. The caller might send a incomplete sun_path; sun_path
1234 * must be terminated by \0 (see the manual page), but
1235 * unfortunately it is quite common to specify sockaddr_un
1236 * length as "strlen(x->sun_path)" while it should be
1237 * "strlen(...) + 1". We'll fix that here if needed.
1238 * Linux kernel has a similar feature.
1239 */
1240
1241 if (sa_family == AF_UNIX) {
1242 if (len < unix_maxlen && len > 0) {
1243 char *cp = (char*)target_saddr;
1244
1245 if ( cp[len-1] && !cp[len] )
1246 len++;
1247 }
1248 if (len > unix_maxlen)
1249 len = unix_maxlen;
1250 }
1251
1252 memcpy(addr, target_saddr, len);
1253 addr->sa_family = sa_family;
1254 unlock_user(target_saddr, target_addr, 0);
1255
1256 return 0;
1257 }
1258
1259 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1260 struct sockaddr *addr,
1261 socklen_t len)
1262 {
1263 struct target_sockaddr *target_saddr;
1264
1265 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1266 if (!target_saddr)
1267 return -TARGET_EFAULT;
1268 memcpy(target_saddr, addr, len);
1269 target_saddr->sa_family = tswap16(addr->sa_family);
1270 unlock_user(target_saddr, target_addr, len);
1271
1272 return 0;
1273 }
1274
1275 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1276 struct target_msghdr *target_msgh)
1277 {
1278 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1279 abi_long msg_controllen;
1280 abi_ulong target_cmsg_addr;
1281 struct target_cmsghdr *target_cmsg;
1282 socklen_t space = 0;
1283
1284 msg_controllen = tswapal(target_msgh->msg_controllen);
1285 if (msg_controllen < sizeof (struct target_cmsghdr))
1286 goto the_end;
1287 target_cmsg_addr = tswapal(target_msgh->msg_control);
1288 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1289 if (!target_cmsg)
1290 return -TARGET_EFAULT;
1291
1292 while (cmsg && target_cmsg) {
1293 void *data = CMSG_DATA(cmsg);
1294 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1295
1296 int len = tswapal(target_cmsg->cmsg_len)
1297 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1298
1299 space += CMSG_SPACE(len);
1300 if (space > msgh->msg_controllen) {
1301 space -= CMSG_SPACE(len);
1302 gemu_log("Host cmsg overflow\n");
1303 break;
1304 }
1305
1306 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1307 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1308 cmsg->cmsg_len = CMSG_LEN(len);
1309
1310 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1311 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1312 memcpy(data, target_data, len);
1313 } else {
1314 int *fd = (int *)data;
1315 int *target_fd = (int *)target_data;
1316 int i, numfds = len / sizeof(int);
1317
1318 for (i = 0; i < numfds; i++)
1319 fd[i] = tswap32(target_fd[i]);
1320 }
1321
1322 cmsg = CMSG_NXTHDR(msgh, cmsg);
1323 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1324 }
1325 unlock_user(target_cmsg, target_cmsg_addr, 0);
1326 the_end:
1327 msgh->msg_controllen = space;
1328 return 0;
1329 }
1330
1331 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1332 struct msghdr *msgh)
1333 {
1334 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1335 abi_long msg_controllen;
1336 abi_ulong target_cmsg_addr;
1337 struct target_cmsghdr *target_cmsg;
1338 socklen_t space = 0;
1339
1340 msg_controllen = tswapal(target_msgh->msg_controllen);
1341 if (msg_controllen < sizeof (struct target_cmsghdr))
1342 goto the_end;
1343 target_cmsg_addr = tswapal(target_msgh->msg_control);
1344 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1345 if (!target_cmsg)
1346 return -TARGET_EFAULT;
1347
1348 while (cmsg && target_cmsg) {
1349 void *data = CMSG_DATA(cmsg);
1350 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1351
1352 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1353
1354 space += TARGET_CMSG_SPACE(len);
1355 if (space > msg_controllen) {
1356 space -= TARGET_CMSG_SPACE(len);
1357 gemu_log("Target cmsg overflow\n");
1358 break;
1359 }
1360
1361 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1362 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1363 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1364
1365 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1366 (cmsg->cmsg_type == SCM_RIGHTS)) {
1367 int *fd = (int *)data;
1368 int *target_fd = (int *)target_data;
1369 int i, numfds = len / sizeof(int);
1370
1371 for (i = 0; i < numfds; i++)
1372 target_fd[i] = tswap32(fd[i]);
1373 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1374 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1375 (len == sizeof(struct timeval))) {
1376 /* copy struct timeval to target */
1377 struct timeval *tv = (struct timeval *)data;
1378 struct target_timeval *target_tv =
1379 (struct target_timeval *)target_data;
1380
1381 target_tv->tv_sec = tswapal(tv->tv_sec);
1382 target_tv->tv_usec = tswapal(tv->tv_usec);
1383 } else {
1384 gemu_log("Unsupported ancillary data: %d/%d\n",
1385 cmsg->cmsg_level, cmsg->cmsg_type);
1386 memcpy(target_data, data, len);
1387 }
1388
1389 cmsg = CMSG_NXTHDR(msgh, cmsg);
1390 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1391 }
1392 unlock_user(target_cmsg, target_cmsg_addr, space);
1393 the_end:
1394 target_msgh->msg_controllen = tswapal(space);
1395 return 0;
1396 }
1397
1398 /* do_setsockopt() Must return target values and target errnos. */
1399 static abi_long do_setsockopt(int sockfd, int level, int optname,
1400 abi_ulong optval_addr, socklen_t optlen)
1401 {
1402 abi_long ret;
1403 int val;
1404 struct ip_mreqn *ip_mreq;
1405 struct ip_mreq_source *ip_mreq_source;
1406
1407 switch(level) {
1408 case SOL_TCP:
1409 /* TCP options all take an 'int' value. */
1410 if (optlen < sizeof(uint32_t))
1411 return -TARGET_EINVAL;
1412
1413 if (get_user_u32(val, optval_addr))
1414 return -TARGET_EFAULT;
1415 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1416 break;
1417 case SOL_IP:
1418 switch(optname) {
1419 case IP_TOS:
1420 case IP_TTL:
1421 case IP_HDRINCL:
1422 case IP_ROUTER_ALERT:
1423 case IP_RECVOPTS:
1424 case IP_RETOPTS:
1425 case IP_PKTINFO:
1426 case IP_MTU_DISCOVER:
1427 case IP_RECVERR:
1428 case IP_RECVTOS:
1429 #ifdef IP_FREEBIND
1430 case IP_FREEBIND:
1431 #endif
1432 case IP_MULTICAST_TTL:
1433 case IP_MULTICAST_LOOP:
1434 val = 0;
1435 if (optlen >= sizeof(uint32_t)) {
1436 if (get_user_u32(val, optval_addr))
1437 return -TARGET_EFAULT;
1438 } else if (optlen >= 1) {
1439 if (get_user_u8(val, optval_addr))
1440 return -TARGET_EFAULT;
1441 }
1442 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1443 break;
1444 case IP_ADD_MEMBERSHIP:
1445 case IP_DROP_MEMBERSHIP:
1446 if (optlen < sizeof (struct target_ip_mreq) ||
1447 optlen > sizeof (struct target_ip_mreqn))
1448 return -TARGET_EINVAL;
1449
1450 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1451 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1452 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1453 break;
1454
1455 case IP_BLOCK_SOURCE:
1456 case IP_UNBLOCK_SOURCE:
1457 case IP_ADD_SOURCE_MEMBERSHIP:
1458 case IP_DROP_SOURCE_MEMBERSHIP:
1459 if (optlen != sizeof (struct target_ip_mreq_source))
1460 return -TARGET_EINVAL;
1461
1462 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1463 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1464 unlock_user (ip_mreq_source, optval_addr, 0);
1465 break;
1466
1467 default:
1468 goto unimplemented;
1469 }
1470 break;
1471 case SOL_RAW:
1472 switch (optname) {
1473 case ICMP_FILTER:
1474 /* struct icmp_filter takes an u32 value */
1475 if (optlen < sizeof(uint32_t)) {
1476 return -TARGET_EINVAL;
1477 }
1478
1479 if (get_user_u32(val, optval_addr)) {
1480 return -TARGET_EFAULT;
1481 }
1482 ret = get_errno(setsockopt(sockfd, level, optname,
1483 &val, sizeof(val)));
1484 break;
1485
1486 default:
1487 goto unimplemented;
1488 }
1489 break;
1490 case TARGET_SOL_SOCKET:
1491 switch (optname) {
1492 /* Options with 'int' argument. */
1493 case TARGET_SO_DEBUG:
1494 optname = SO_DEBUG;
1495 break;
1496 case TARGET_SO_REUSEADDR:
1497 optname = SO_REUSEADDR;
1498 break;
1499 case TARGET_SO_TYPE:
1500 optname = SO_TYPE;
1501 break;
1502 case TARGET_SO_ERROR:
1503 optname = SO_ERROR;
1504 break;
1505 case TARGET_SO_DONTROUTE:
1506 optname = SO_DONTROUTE;
1507 break;
1508 case TARGET_SO_BROADCAST:
1509 optname = SO_BROADCAST;
1510 break;
1511 case TARGET_SO_SNDBUF:
1512 optname = SO_SNDBUF;
1513 break;
1514 case TARGET_SO_RCVBUF:
1515 optname = SO_RCVBUF;
1516 break;
1517 case TARGET_SO_KEEPALIVE:
1518 optname = SO_KEEPALIVE;
1519 break;
1520 case TARGET_SO_OOBINLINE:
1521 optname = SO_OOBINLINE;
1522 break;
1523 case TARGET_SO_NO_CHECK:
1524 optname = SO_NO_CHECK;
1525 break;
1526 case TARGET_SO_PRIORITY:
1527 optname = SO_PRIORITY;
1528 break;
1529 #ifdef SO_BSDCOMPAT
1530 case TARGET_SO_BSDCOMPAT:
1531 optname = SO_BSDCOMPAT;
1532 break;
1533 #endif
1534 case TARGET_SO_PASSCRED:
1535 optname = SO_PASSCRED;
1536 break;
1537 case TARGET_SO_TIMESTAMP:
1538 optname = SO_TIMESTAMP;
1539 break;
1540 case TARGET_SO_RCVLOWAT:
1541 optname = SO_RCVLOWAT;
1542 break;
1543 case TARGET_SO_RCVTIMEO:
1544 optname = SO_RCVTIMEO;
1545 break;
1546 case TARGET_SO_SNDTIMEO:
1547 optname = SO_SNDTIMEO;
1548 break;
1549 break;
1550 default:
1551 goto unimplemented;
1552 }
1553 if (optlen < sizeof(uint32_t))
1554 return -TARGET_EINVAL;
1555
1556 if (get_user_u32(val, optval_addr))
1557 return -TARGET_EFAULT;
1558 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1559 break;
1560 default:
1561 unimplemented:
1562 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1563 ret = -TARGET_ENOPROTOOPT;
1564 }
1565 return ret;
1566 }
1567
1568 /* do_getsockopt() Must return target values and target errnos. */
1569 static abi_long do_getsockopt(int sockfd, int level, int optname,
1570 abi_ulong optval_addr, abi_ulong optlen)
1571 {
1572 abi_long ret;
1573 int len, val;
1574 socklen_t lv;
1575
1576 switch(level) {
1577 case TARGET_SOL_SOCKET:
1578 level = SOL_SOCKET;
1579 switch (optname) {
1580 /* These don't just return a single integer */
1581 case TARGET_SO_LINGER:
1582 case TARGET_SO_RCVTIMEO:
1583 case TARGET_SO_SNDTIMEO:
1584 case TARGET_SO_PEERNAME:
1585 goto unimplemented;
1586 case TARGET_SO_PEERCRED: {
1587 struct ucred cr;
1588 socklen_t crlen;
1589 struct target_ucred *tcr;
1590
1591 if (get_user_u32(len, optlen)) {
1592 return -TARGET_EFAULT;
1593 }
1594 if (len < 0) {
1595 return -TARGET_EINVAL;
1596 }
1597
1598 crlen = sizeof(cr);
1599 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1600 &cr, &crlen));
1601 if (ret < 0) {
1602 return ret;
1603 }
1604 if (len > crlen) {
1605 len = crlen;
1606 }
1607 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1608 return -TARGET_EFAULT;
1609 }
1610 __put_user(cr.pid, &tcr->pid);
1611 __put_user(cr.uid, &tcr->uid);
1612 __put_user(cr.gid, &tcr->gid);
1613 unlock_user_struct(tcr, optval_addr, 1);
1614 if (put_user_u32(len, optlen)) {
1615 return -TARGET_EFAULT;
1616 }
1617 break;
1618 }
1619 /* Options with 'int' argument. */
1620 case TARGET_SO_DEBUG:
1621 optname = SO_DEBUG;
1622 goto int_case;
1623 case TARGET_SO_REUSEADDR:
1624 optname = SO_REUSEADDR;
1625 goto int_case;
1626 case TARGET_SO_TYPE:
1627 optname = SO_TYPE;
1628 goto int_case;
1629 case TARGET_SO_ERROR:
1630 optname = SO_ERROR;
1631 goto int_case;
1632 case TARGET_SO_DONTROUTE:
1633 optname = SO_DONTROUTE;
1634 goto int_case;
1635 case TARGET_SO_BROADCAST:
1636 optname = SO_BROADCAST;
1637 goto int_case;
1638 case TARGET_SO_SNDBUF:
1639 optname = SO_SNDBUF;
1640 goto int_case;
1641 case TARGET_SO_RCVBUF:
1642 optname = SO_RCVBUF;
1643 goto int_case;
1644 case TARGET_SO_KEEPALIVE:
1645 optname = SO_KEEPALIVE;
1646 goto int_case;
1647 case TARGET_SO_OOBINLINE:
1648 optname = SO_OOBINLINE;
1649 goto int_case;
1650 case TARGET_SO_NO_CHECK:
1651 optname = SO_NO_CHECK;
1652 goto int_case;
1653 case TARGET_SO_PRIORITY:
1654 optname = SO_PRIORITY;
1655 goto int_case;
1656 #ifdef SO_BSDCOMPAT
1657 case TARGET_SO_BSDCOMPAT:
1658 optname = SO_BSDCOMPAT;
1659 goto int_case;
1660 #endif
1661 case TARGET_SO_PASSCRED:
1662 optname = SO_PASSCRED;
1663 goto int_case;
1664 case TARGET_SO_TIMESTAMP:
1665 optname = SO_TIMESTAMP;
1666 goto int_case;
1667 case TARGET_SO_RCVLOWAT:
1668 optname = SO_RCVLOWAT;
1669 goto int_case;
1670 default:
1671 goto int_case;
1672 }
1673 break;
1674 case SOL_TCP:
1675 /* TCP options all take an 'int' value. */
1676 int_case:
1677 if (get_user_u32(len, optlen))
1678 return -TARGET_EFAULT;
1679 if (len < 0)
1680 return -TARGET_EINVAL;
1681 lv = sizeof(lv);
1682 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1683 if (ret < 0)
1684 return ret;
1685 if (len > lv)
1686 len = lv;
1687 if (len == 4) {
1688 if (put_user_u32(val, optval_addr))
1689 return -TARGET_EFAULT;
1690 } else {
1691 if (put_user_u8(val, optval_addr))
1692 return -TARGET_EFAULT;
1693 }
1694 if (put_user_u32(len, optlen))
1695 return -TARGET_EFAULT;
1696 break;
1697 case SOL_IP:
1698 switch(optname) {
1699 case IP_TOS:
1700 case IP_TTL:
1701 case IP_HDRINCL:
1702 case IP_ROUTER_ALERT:
1703 case IP_RECVOPTS:
1704 case IP_RETOPTS:
1705 case IP_PKTINFO:
1706 case IP_MTU_DISCOVER:
1707 case IP_RECVERR:
1708 case IP_RECVTOS:
1709 #ifdef IP_FREEBIND
1710 case IP_FREEBIND:
1711 #endif
1712 case IP_MULTICAST_TTL:
1713 case IP_MULTICAST_LOOP:
1714 if (get_user_u32(len, optlen))
1715 return -TARGET_EFAULT;
1716 if (len < 0)
1717 return -TARGET_EINVAL;
1718 lv = sizeof(lv);
1719 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1720 if (ret < 0)
1721 return ret;
1722 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1723 len = 1;
1724 if (put_user_u32(len, optlen)
1725 || put_user_u8(val, optval_addr))
1726 return -TARGET_EFAULT;
1727 } else {
1728 if (len > sizeof(int))
1729 len = sizeof(int);
1730 if (put_user_u32(len, optlen)
1731 || put_user_u32(val, optval_addr))
1732 return -TARGET_EFAULT;
1733 }
1734 break;
1735 default:
1736 ret = -TARGET_ENOPROTOOPT;
1737 break;
1738 }
1739 break;
1740 default:
1741 unimplemented:
1742 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1743 level, optname);
1744 ret = -TARGET_EOPNOTSUPP;
1745 break;
1746 }
1747 return ret;
1748 }
1749
1750 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1751 int count, int copy)
1752 {
1753 struct target_iovec *target_vec;
1754 struct iovec *vec;
1755 abi_ulong total_len, max_len;
1756 int i;
1757
1758 if (count == 0) {
1759 errno = 0;
1760 return NULL;
1761 }
1762 if (count > IOV_MAX) {
1763 errno = EINVAL;
1764 return NULL;
1765 }
1766
1767 vec = calloc(count, sizeof(struct iovec));
1768 if (vec == NULL) {
1769 errno = ENOMEM;
1770 return NULL;
1771 }
1772
1773 target_vec = lock_user(VERIFY_READ, target_addr,
1774 count * sizeof(struct target_iovec), 1);
1775 if (target_vec == NULL) {
1776 errno = EFAULT;
1777 goto fail2;
1778 }
1779
1780 /* ??? If host page size > target page size, this will result in a
1781 value larger than what we can actually support. */
1782 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1783 total_len = 0;
1784
1785 for (i = 0; i < count; i++) {
1786 abi_ulong base = tswapal(target_vec[i].iov_base);
1787 abi_long len = tswapal(target_vec[i].iov_len);
1788
1789 if (len < 0) {
1790 errno = EINVAL;
1791 goto fail;
1792 } else if (len == 0) {
1793 /* Zero length pointer is ignored. */
1794 vec[i].iov_base = 0;
1795 } else {
1796 vec[i].iov_base = lock_user(type, base, len, copy);
1797 if (!vec[i].iov_base) {
1798 errno = EFAULT;
1799 goto fail;
1800 }
1801 if (len > max_len - total_len) {
1802 len = max_len - total_len;
1803 }
1804 }
1805 vec[i].iov_len = len;
1806 total_len += len;
1807 }
1808
1809 unlock_user(target_vec, target_addr, 0);
1810 return vec;
1811
1812 fail:
1813 free(vec);
1814 fail2:
1815 unlock_user(target_vec, target_addr, 0);
1816 return NULL;
1817 }
1818
1819 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1820 int count, int copy)
1821 {
1822 struct target_iovec *target_vec;
1823 int i;
1824
1825 target_vec = lock_user(VERIFY_READ, target_addr,
1826 count * sizeof(struct target_iovec), 1);
1827 if (target_vec) {
1828 for (i = 0; i < count; i++) {
1829 abi_ulong base = tswapal(target_vec[i].iov_base);
1830 abi_long len = tswapal(target_vec[i].iov_base);
1831 if (len < 0) {
1832 break;
1833 }
1834 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1835 }
1836 unlock_user(target_vec, target_addr, 0);
1837 }
1838
1839 free(vec);
1840 }
1841
1842 /* do_socket() Must return target values and target errnos. */
1843 static abi_long do_socket(int domain, int type, int protocol)
1844 {
1845 #if defined(TARGET_MIPS)
1846 switch(type) {
1847 case TARGET_SOCK_DGRAM:
1848 type = SOCK_DGRAM;
1849 break;
1850 case TARGET_SOCK_STREAM:
1851 type = SOCK_STREAM;
1852 break;
1853 case TARGET_SOCK_RAW:
1854 type = SOCK_RAW;
1855 break;
1856 case TARGET_SOCK_RDM:
1857 type = SOCK_RDM;
1858 break;
1859 case TARGET_SOCK_SEQPACKET:
1860 type = SOCK_SEQPACKET;
1861 break;
1862 case TARGET_SOCK_PACKET:
1863 type = SOCK_PACKET;
1864 break;
1865 }
1866 #endif
1867 if (domain == PF_NETLINK)
1868 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1869 return get_errno(socket(domain, type, protocol));
1870 }
1871
1872 /* do_bind() Must return target values and target errnos. */
1873 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1874 socklen_t addrlen)
1875 {
1876 void *addr;
1877 abi_long ret;
1878
1879 if ((int)addrlen < 0) {
1880 return -TARGET_EINVAL;
1881 }
1882
1883 addr = alloca(addrlen+1);
1884
1885 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1886 if (ret)
1887 return ret;
1888
1889 return get_errno(bind(sockfd, addr, addrlen));
1890 }
1891
1892 /* do_connect() Must return target values and target errnos. */
1893 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1894 socklen_t addrlen)
1895 {
1896 void *addr;
1897 abi_long ret;
1898
1899 if ((int)addrlen < 0) {
1900 return -TARGET_EINVAL;
1901 }
1902
1903 addr = alloca(addrlen);
1904
1905 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1906 if (ret)
1907 return ret;
1908
1909 return get_errno(connect(sockfd, addr, addrlen));
1910 }
1911
1912 /* do_sendrecvmsg() Must return target values and target errnos. */
1913 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1914 int flags, int send)
1915 {
1916 abi_long ret, len;
1917 struct target_msghdr *msgp;
1918 struct msghdr msg;
1919 int count;
1920 struct iovec *vec;
1921 abi_ulong target_vec;
1922
1923 /* FIXME */
1924 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1925 msgp,
1926 target_msg,
1927 send ? 1 : 0))
1928 return -TARGET_EFAULT;
1929 if (msgp->msg_name) {
1930 msg.msg_namelen = tswap32(msgp->msg_namelen);
1931 msg.msg_name = alloca(msg.msg_namelen);
1932 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1933 msg.msg_namelen);
1934 if (ret) {
1935 goto out2;
1936 }
1937 } else {
1938 msg.msg_name = NULL;
1939 msg.msg_namelen = 0;
1940 }
1941 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1942 msg.msg_control = alloca(msg.msg_controllen);
1943 msg.msg_flags = tswap32(msgp->msg_flags);
1944
1945 count = tswapal(msgp->msg_iovlen);
1946 target_vec = tswapal(msgp->msg_iov);
1947 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1948 target_vec, count, send);
1949 if (vec == NULL) {
1950 ret = -host_to_target_errno(errno);
1951 goto out2;
1952 }
1953 msg.msg_iovlen = count;
1954 msg.msg_iov = vec;
1955
1956 if (send) {
1957 ret = target_to_host_cmsg(&msg, msgp);
1958 if (ret == 0)
1959 ret = get_errno(sendmsg(fd, &msg, flags));
1960 } else {
1961 ret = get_errno(recvmsg(fd, &msg, flags));
1962 if (!is_error(ret)) {
1963 len = ret;
1964 ret = host_to_target_cmsg(msgp, &msg);
1965 if (!is_error(ret)) {
1966 msgp->msg_namelen = tswap32(msg.msg_namelen);
1967 if (msg.msg_name != NULL) {
1968 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1969 msg.msg_name, msg.msg_namelen);
1970 if (ret) {
1971 goto out;
1972 }
1973 }
1974
1975 ret = len;
1976 }
1977 }
1978 }
1979
1980 out:
1981 unlock_iovec(vec, target_vec, count, !send);
1982 out2:
1983 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1984 return ret;
1985 }
1986
1987 /* do_accept() Must return target values and target errnos. */
1988 static abi_long do_accept(int fd, abi_ulong target_addr,
1989 abi_ulong target_addrlen_addr)
1990 {
1991 socklen_t addrlen;
1992 void *addr;
1993 abi_long ret;
1994
1995 if (target_addr == 0)
1996 return get_errno(accept(fd, NULL, NULL));
1997
1998 /* linux returns EINVAL if addrlen pointer is invalid */
1999 if (get_user_u32(addrlen, target_addrlen_addr))
2000 return -TARGET_EINVAL;
2001
2002 if ((int)addrlen < 0) {
2003 return -TARGET_EINVAL;
2004 }
2005
2006 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2007 return -TARGET_EINVAL;
2008
2009 addr = alloca(addrlen);
2010
2011 ret = get_errno(accept(fd, addr, &addrlen));
2012 if (!is_error(ret)) {
2013 host_to_target_sockaddr(target_addr, addr, addrlen);
2014 if (put_user_u32(addrlen, target_addrlen_addr))
2015 ret = -TARGET_EFAULT;
2016 }
2017 return ret;
2018 }
2019
2020 /* do_getpeername() Must return target values and target errnos. */
2021 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2022 abi_ulong target_addrlen_addr)
2023 {
2024 socklen_t addrlen;
2025 void *addr;
2026 abi_long ret;
2027
2028 if (get_user_u32(addrlen, target_addrlen_addr))
2029 return -TARGET_EFAULT;
2030
2031 if ((int)addrlen < 0) {
2032 return -TARGET_EINVAL;
2033 }
2034
2035 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2036 return -TARGET_EFAULT;
2037
2038 addr = alloca(addrlen);
2039
2040 ret = get_errno(getpeername(fd, addr, &addrlen));
2041 if (!is_error(ret)) {
2042 host_to_target_sockaddr(target_addr, addr, addrlen);
2043 if (put_user_u32(addrlen, target_addrlen_addr))
2044 ret = -TARGET_EFAULT;
2045 }
2046 return ret;
2047 }
2048
2049 /* do_getsockname() Must return target values and target errnos. */
2050 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2051 abi_ulong target_addrlen_addr)
2052 {
2053 socklen_t addrlen;
2054 void *addr;
2055 abi_long ret;
2056
2057 if (get_user_u32(addrlen, target_addrlen_addr))
2058 return -TARGET_EFAULT;
2059
2060 if ((int)addrlen < 0) {
2061 return -TARGET_EINVAL;
2062 }
2063
2064 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2065 return -TARGET_EFAULT;
2066
2067 addr = alloca(addrlen);
2068
2069 ret = get_errno(getsockname(fd, addr, &addrlen));
2070 if (!is_error(ret)) {
2071 host_to_target_sockaddr(target_addr, addr, addrlen);
2072 if (put_user_u32(addrlen, target_addrlen_addr))
2073 ret = -TARGET_EFAULT;
2074 }
2075 return ret;
2076 }
2077
2078 /* do_socketpair() Must return target values and target errnos. */
2079 static abi_long do_socketpair(int domain, int type, int protocol,
2080 abi_ulong target_tab_addr)
2081 {
2082 int tab[2];
2083 abi_long ret;
2084
2085 ret = get_errno(socketpair(domain, type, protocol, tab));
2086 if (!is_error(ret)) {
2087 if (put_user_s32(tab[0], target_tab_addr)
2088 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2089 ret = -TARGET_EFAULT;
2090 }
2091 return ret;
2092 }
2093
2094 /* do_sendto() Must return target values and target errnos. */
2095 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2096 abi_ulong target_addr, socklen_t addrlen)
2097 {
2098 void *addr;
2099 void *host_msg;
2100 abi_long ret;
2101
2102 if ((int)addrlen < 0) {
2103 return -TARGET_EINVAL;
2104 }
2105
2106 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2107 if (!host_msg)
2108 return -TARGET_EFAULT;
2109 if (target_addr) {
2110 addr = alloca(addrlen);
2111 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2112 if (ret) {
2113 unlock_user(host_msg, msg, 0);
2114 return ret;
2115 }
2116 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2117 } else {
2118 ret = get_errno(send(fd, host_msg, len, flags));
2119 }
2120 unlock_user(host_msg, msg, 0);
2121 return ret;
2122 }
2123
2124 /* do_recvfrom() Must return target values and target errnos. */
2125 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2126 abi_ulong target_addr,
2127 abi_ulong target_addrlen)
2128 {
2129 socklen_t addrlen;
2130 void *addr;
2131 void *host_msg;
2132 abi_long ret;
2133
2134 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2135 if (!host_msg)
2136 return -TARGET_EFAULT;
2137 if (target_addr) {
2138 if (get_user_u32(addrlen, target_addrlen)) {
2139 ret = -TARGET_EFAULT;
2140 goto fail;
2141 }
2142 if ((int)addrlen < 0) {
2143 ret = -TARGET_EINVAL;
2144 goto fail;
2145 }
2146 addr = alloca(addrlen);
2147 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2148 } else {
2149 addr = NULL; /* To keep compiler quiet. */
2150 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2151 }
2152 if (!is_error(ret)) {
2153 if (target_addr) {
2154 host_to_target_sockaddr(target_addr, addr, addrlen);
2155 if (put_user_u32(addrlen, target_addrlen)) {
2156 ret = -TARGET_EFAULT;
2157 goto fail;
2158 }
2159 }
2160 unlock_user(host_msg, msg, len);
2161 } else {
2162 fail:
2163 unlock_user(host_msg, msg, 0);
2164 }
2165 return ret;
2166 }
2167
2168 #ifdef TARGET_NR_socketcall
2169 /* do_socketcall() Must return target values and target errnos. */
2170 static abi_long do_socketcall(int num, abi_ulong vptr)
2171 {
2172 abi_long ret;
2173 const int n = sizeof(abi_ulong);
2174
2175 switch(num) {
2176 case SOCKOP_socket:
2177 {
2178 abi_ulong domain, type, protocol;
2179
2180 if (get_user_ual(domain, vptr)
2181 || get_user_ual(type, vptr + n)
2182 || get_user_ual(protocol, vptr + 2 * n))
2183 return -TARGET_EFAULT;
2184
2185 ret = do_socket(domain, type, protocol);
2186 }
2187 break;
2188 case SOCKOP_bind:
2189 {
2190 abi_ulong sockfd;
2191 abi_ulong target_addr;
2192 socklen_t addrlen;
2193
2194 if (get_user_ual(sockfd, vptr)
2195 || get_user_ual(target_addr, vptr + n)
2196 || get_user_ual(addrlen, vptr + 2 * n))
2197 return -TARGET_EFAULT;
2198
2199 ret = do_bind(sockfd, target_addr, addrlen);
2200 }
2201 break;
2202 case SOCKOP_connect:
2203 {
2204 abi_ulong sockfd;
2205 abi_ulong target_addr;
2206 socklen_t addrlen;
2207
2208 if (get_user_ual(sockfd, vptr)
2209 || get_user_ual(target_addr, vptr + n)
2210 || get_user_ual(addrlen, vptr + 2 * n))
2211 return -TARGET_EFAULT;
2212
2213 ret = do_connect(sockfd, target_addr, addrlen);
2214 }
2215 break;
2216 case SOCKOP_listen:
2217 {
2218 abi_ulong sockfd, backlog;
2219
2220 if (get_user_ual(sockfd, vptr)
2221 || get_user_ual(backlog, vptr + n))
2222 return -TARGET_EFAULT;
2223
2224 ret = get_errno(listen(sockfd, backlog));
2225 }
2226 break;
2227 case SOCKOP_accept:
2228 {
2229 abi_ulong sockfd;
2230 abi_ulong target_addr, target_addrlen;
2231
2232 if (get_user_ual(sockfd, vptr)
2233 || get_user_ual(target_addr, vptr + n)
2234 || get_user_ual(target_addrlen, vptr + 2 * n))
2235 return -TARGET_EFAULT;
2236
2237 ret = do_accept(sockfd, target_addr, target_addrlen);
2238 }
2239 break;
2240 case SOCKOP_getsockname:
2241 {
2242 abi_ulong sockfd;
2243 abi_ulong target_addr, target_addrlen;
2244
2245 if (get_user_ual(sockfd, vptr)
2246 || get_user_ual(target_addr, vptr + n)
2247 || get_user_ual(target_addrlen, vptr + 2 * n))
2248 return -TARGET_EFAULT;
2249
2250 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2251 }
2252 break;
2253 case SOCKOP_getpeername:
2254 {
2255 abi_ulong sockfd;
2256 abi_ulong target_addr, target_addrlen;
2257
2258 if (get_user_ual(sockfd, vptr)
2259 || get_user_ual(target_addr, vptr + n)
2260 || get_user_ual(target_addrlen, vptr + 2 * n))
2261 return -TARGET_EFAULT;
2262
2263 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2264 }
2265 break;
2266 case SOCKOP_socketpair:
2267 {
2268 abi_ulong domain, type, protocol;
2269 abi_ulong tab;
2270
2271 if (get_user_ual(domain, vptr)
2272 || get_user_ual(type, vptr + n)
2273 || get_user_ual(protocol, vptr + 2 * n)
2274 || get_user_ual(tab, vptr + 3 * n))
2275 return -TARGET_EFAULT;
2276
2277 ret = do_socketpair(domain, type, protocol, tab);
2278 }
2279 break;
2280 case SOCKOP_send:
2281 {
2282 abi_ulong sockfd;
2283 abi_ulong msg;
2284 size_t len;
2285 abi_ulong flags;
2286
2287 if (get_user_ual(sockfd, vptr)
2288 || get_user_ual(msg, vptr + n)
2289 || get_user_ual(len, vptr + 2 * n)
2290 || get_user_ual(flags, vptr + 3 * n))
2291 return -TARGET_EFAULT;
2292
2293 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2294 }
2295 break;
2296 case SOCKOP_recv:
2297 {
2298 abi_ulong sockfd;
2299 abi_ulong msg;
2300 size_t len;
2301 abi_ulong flags;
2302
2303 if (get_user_ual(sockfd, vptr)
2304 || get_user_ual(msg, vptr + n)
2305 || get_user_ual(len, vptr + 2 * n)
2306 || get_user_ual(flags, vptr + 3 * n))
2307 return -TARGET_EFAULT;
2308
2309 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2310 }
2311 break;
2312 case SOCKOP_sendto:
2313 {
2314 abi_ulong sockfd;
2315 abi_ulong msg;
2316 size_t len;
2317 abi_ulong flags;
2318 abi_ulong addr;
2319 socklen_t addrlen;
2320
2321 if (get_user_ual(sockfd, vptr)
2322 || get_user_ual(msg, vptr + n)
2323 || get_user_ual(len, vptr + 2 * n)
2324 || get_user_ual(flags, vptr + 3 * n)
2325 || get_user_ual(addr, vptr + 4 * n)
2326 || get_user_ual(addrlen, vptr + 5 * n))
2327 return -TARGET_EFAULT;
2328
2329 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2330 }
2331 break;
2332 case SOCKOP_recvfrom:
2333 {
2334 abi_ulong sockfd;
2335 abi_ulong msg;
2336 size_t len;
2337 abi_ulong flags;
2338 abi_ulong addr;
2339 socklen_t addrlen;
2340
2341 if (get_user_ual(sockfd, vptr)
2342 || get_user_ual(msg, vptr + n)
2343 || get_user_ual(len, vptr + 2 * n)
2344 || get_user_ual(flags, vptr + 3 * n)
2345 || get_user_ual(addr, vptr + 4 * n)
2346 || get_user_ual(addrlen, vptr + 5 * n))
2347 return -TARGET_EFAULT;
2348
2349 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2350 }
2351 break;
2352 case SOCKOP_shutdown:
2353 {
2354 abi_ulong sockfd, how;
2355
2356 if (get_user_ual(sockfd, vptr)
2357 || get_user_ual(how, vptr + n))
2358 return -TARGET_EFAULT;
2359
2360 ret = get_errno(shutdown(sockfd, how));
2361 }
2362 break;
2363 case SOCKOP_sendmsg:
2364 case SOCKOP_recvmsg:
2365 {
2366 abi_ulong fd;
2367 abi_ulong target_msg;
2368 abi_ulong flags;
2369
2370 if (get_user_ual(fd, vptr)
2371 || get_user_ual(target_msg, vptr + n)
2372 || get_user_ual(flags, vptr + 2 * n))
2373 return -TARGET_EFAULT;
2374
2375 ret = do_sendrecvmsg(fd, target_msg, flags,
2376 (num == SOCKOP_sendmsg));
2377 }
2378 break;
2379 case SOCKOP_setsockopt:
2380 {
2381 abi_ulong sockfd;
2382 abi_ulong level;
2383 abi_ulong optname;
2384 abi_ulong optval;
2385 socklen_t optlen;
2386
2387 if (get_user_ual(sockfd, vptr)
2388 || get_user_ual(level, vptr + n)
2389 || get_user_ual(optname, vptr + 2 * n)
2390 || get_user_ual(optval, vptr + 3 * n)
2391 || get_user_ual(optlen, vptr + 4 * n))
2392 return -TARGET_EFAULT;
2393
2394 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2395 }
2396 break;
2397 case SOCKOP_getsockopt:
2398 {
2399 abi_ulong sockfd;
2400 abi_ulong level;
2401 abi_ulong optname;
2402 abi_ulong optval;
2403 socklen_t optlen;
2404
2405 if (get_user_ual(sockfd, vptr)
2406 || get_user_ual(level, vptr + n)
2407 || get_user_ual(optname, vptr + 2 * n)
2408 || get_user_ual(optval, vptr + 3 * n)
2409 || get_user_ual(optlen, vptr + 4 * n))
2410 return -TARGET_EFAULT;
2411
2412 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2413 }
2414 break;
2415 default:
2416 gemu_log("Unsupported socketcall: %d\n", num);
2417 ret = -TARGET_ENOSYS;
2418 break;
2419 }
2420 return ret;
2421 }
2422 #endif
2423
2424 #define N_SHM_REGIONS 32
2425
2426 static struct shm_region {
2427 abi_ulong start;
2428 abi_ulong size;
2429 } shm_regions[N_SHM_REGIONS];
2430
2431 struct target_ipc_perm
2432 {
2433 abi_long __key;
2434 abi_ulong uid;
2435 abi_ulong gid;
2436 abi_ulong cuid;
2437 abi_ulong cgid;
2438 unsigned short int mode;
2439 unsigned short int __pad1;
2440 unsigned short int __seq;
2441 unsigned short int __pad2;
2442 abi_ulong __unused1;
2443 abi_ulong __unused2;
2444 };
2445
2446 struct target_semid_ds
2447 {
2448 struct target_ipc_perm sem_perm;
2449 abi_ulong sem_otime;
2450 abi_ulong __unused1;
2451 abi_ulong sem_ctime;
2452 abi_ulong __unused2;
2453 abi_ulong sem_nsems;
2454 abi_ulong __unused3;
2455 abi_ulong __unused4;
2456 };
2457
2458 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2459 abi_ulong target_addr)
2460 {
2461 struct target_ipc_perm *target_ip;
2462 struct target_semid_ds *target_sd;
2463
2464 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2465 return -TARGET_EFAULT;
2466 target_ip = &(target_sd->sem_perm);
2467 host_ip->__key = tswapal(target_ip->__key);
2468 host_ip->uid = tswapal(target_ip->uid);
2469 host_ip->gid = tswapal(target_ip->gid);
2470 host_ip->cuid = tswapal(target_ip->cuid);
2471 host_ip->cgid = tswapal(target_ip->cgid);
2472 host_ip->mode = tswap16(target_ip->mode);
2473 unlock_user_struct(target_sd, target_addr, 0);
2474 return 0;
2475 }
2476
2477 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2478 struct ipc_perm *host_ip)
2479 {
2480 struct target_ipc_perm *target_ip;
2481 struct target_semid_ds *target_sd;
2482
2483 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2484 return -TARGET_EFAULT;
2485 target_ip = &(target_sd->sem_perm);
2486 target_ip->__key = tswapal(host_ip->__key);
2487 target_ip->uid = tswapal(host_ip->uid);
2488 target_ip->gid = tswapal(host_ip->gid);
2489 target_ip->cuid = tswapal(host_ip->cuid);
2490 target_ip->cgid = tswapal(host_ip->cgid);
2491 target_ip->mode = tswap16(host_ip->mode);
2492 unlock_user_struct(target_sd, target_addr, 1);
2493 return 0;
2494 }
2495
2496 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2497 abi_ulong target_addr)
2498 {
2499 struct target_semid_ds *target_sd;
2500
2501 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2502 return -TARGET_EFAULT;
2503 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2504 return -TARGET_EFAULT;
2505 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2506 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2507 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2508 unlock_user_struct(target_sd, target_addr, 0);
2509 return 0;
2510 }
2511
2512 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2513 struct semid_ds *host_sd)
2514 {
2515 struct target_semid_ds *target_sd;
2516
2517 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2518 return -TARGET_EFAULT;
2519 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2520 return -TARGET_EFAULT;
2521 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2522 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2523 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2524 unlock_user_struct(target_sd, target_addr, 1);
2525 return 0;
2526 }
2527
2528 struct target_seminfo {
2529 int semmap;
2530 int semmni;
2531 int semmns;
2532 int semmnu;
2533 int semmsl;
2534 int semopm;
2535 int semume;
2536 int semusz;
2537 int semvmx;
2538 int semaem;
2539 };
2540
2541 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2542 struct seminfo *host_seminfo)
2543 {
2544 struct target_seminfo *target_seminfo;
2545 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2546 return -TARGET_EFAULT;
2547 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2548 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2549 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2550 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2551 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2552 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2553 __put_user(host_seminfo->semume, &target_seminfo->semume);
2554 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2555 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2556 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2557 unlock_user_struct(target_seminfo, target_addr, 1);
2558 return 0;
2559 }
2560
2561 union semun {
2562 int val;
2563 struct semid_ds *buf;
2564 unsigned short *array;
2565 struct seminfo *__buf;
2566 };
2567
2568 union target_semun {
2569 int val;
2570 abi_ulong buf;
2571 abi_ulong array;
2572 abi_ulong __buf;
2573 };
2574
2575 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2576 abi_ulong target_addr)
2577 {
2578 int nsems;
2579 unsigned short *array;
2580 union semun semun;
2581 struct semid_ds semid_ds;
2582 int i, ret;
2583
2584 semun.buf = &semid_ds;
2585
2586 ret = semctl(semid, 0, IPC_STAT, semun);
2587 if (ret == -1)
2588 return get_errno(ret);
2589
2590 nsems = semid_ds.sem_nsems;
2591
2592 *host_array = malloc(nsems*sizeof(unsigned short));
2593 array = lock_user(VERIFY_READ, target_addr,
2594 nsems*sizeof(unsigned short), 1);
2595 if (!array)
2596 return -TARGET_EFAULT;
2597
2598 for(i=0; i<nsems; i++) {
2599 __get_user((*host_array)[i], &array[i]);
2600 }
2601 unlock_user(array, target_addr, 0);
2602
2603 return 0;
2604 }
2605
2606 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2607 unsigned short **host_array)
2608 {
2609 int nsems;
2610 unsigned short *array;
2611 union semun semun;
2612 struct semid_ds semid_ds;
2613 int i, ret;
2614
2615 semun.buf = &semid_ds;
2616
2617 ret = semctl(semid, 0, IPC_STAT, semun);
2618 if (ret == -1)
2619 return get_errno(ret);
2620
2621 nsems = semid_ds.sem_nsems;
2622
2623 array = lock_user(VERIFY_WRITE, target_addr,
2624 nsems*sizeof(unsigned short), 0);
2625 if (!array)
2626 return -TARGET_EFAULT;
2627
2628 for(i=0; i<nsems; i++) {
2629 __put_user((*host_array)[i], &array[i]);
2630 }
2631 free(*host_array);
2632 unlock_user(array, target_addr, 1);
2633
2634 return 0;
2635 }
2636
2637 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2638 union target_semun target_su)
2639 {
2640 union semun arg;
2641 struct semid_ds dsarg;
2642 unsigned short *array = NULL;
2643 struct seminfo seminfo;
2644 abi_long ret = -TARGET_EINVAL;
2645 abi_long err;
2646 cmd &= 0xff;
2647
2648 switch( cmd ) {
2649 case GETVAL:
2650 case SETVAL:
2651 arg.val = tswap32(target_su.val);
2652 ret = get_errno(semctl(semid, semnum, cmd, arg));
2653 target_su.val = tswap32(arg.val);
2654 break;
2655 case GETALL:
2656 case SETALL:
2657 err = target_to_host_semarray(semid, &array, target_su.array);
2658 if (err)
2659 return err;
2660 arg.array = array;
2661 ret = get_errno(semctl(semid, semnum, cmd, arg));
2662 err = host_to_target_semarray(semid, target_su.array, &array);
2663 if (err)
2664 return err;
2665 break;
2666 case IPC_STAT:
2667 case IPC_SET:
2668 case SEM_STAT:
2669 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2670 if (err)
2671 return err;
2672 arg.buf = &dsarg;
2673 ret = get_errno(semctl(semid, semnum, cmd, arg));
2674 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2675 if (err)
2676 return err;
2677 break;
2678 case IPC_INFO:
2679 case SEM_INFO:
2680 arg.__buf = &seminfo;
2681 ret = get_errno(semctl(semid, semnum, cmd, arg));
2682 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2683 if (err)
2684 return err;
2685 break;
2686 case IPC_RMID:
2687 case GETPID:
2688 case GETNCNT:
2689 case GETZCNT:
2690 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2691 break;
2692 }
2693
2694 return ret;
2695 }
2696
2697 struct target_sembuf {
2698 unsigned short sem_num;
2699 short sem_op;
2700 short sem_flg;
2701 };
2702
2703 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2704 abi_ulong target_addr,
2705 unsigned nsops)
2706 {
2707 struct target_sembuf *target_sembuf;
2708 int i;
2709
2710 target_sembuf = lock_user(VERIFY_READ, target_addr,
2711 nsops*sizeof(struct target_sembuf), 1);
2712 if (!target_sembuf)
2713 return -TARGET_EFAULT;
2714
2715 for(i=0; i<nsops; i++) {
2716 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2717 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2718 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2719 }
2720
2721 unlock_user(target_sembuf, target_addr, 0);
2722
2723 return 0;
2724 }
2725
2726 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2727 {
2728 struct sembuf sops[nsops];
2729
2730 if (target_to_host_sembuf(sops, ptr, nsops))
2731 return -TARGET_EFAULT;
2732
2733 return semop(semid, sops, nsops);
2734 }
2735
2736 struct target_msqid_ds
2737 {
2738 struct target_ipc_perm msg_perm;
2739 abi_ulong msg_stime;
2740 #if TARGET_ABI_BITS == 32
2741 abi_ulong __unused1;
2742 #endif
2743 abi_ulong msg_rtime;
2744 #if TARGET_ABI_BITS == 32
2745 abi_ulong __unused2;
2746 #endif
2747 abi_ulong msg_ctime;
2748 #if TARGET_ABI_BITS == 32
2749 abi_ulong __unused3;
2750 #endif
2751 abi_ulong __msg_cbytes;
2752 abi_ulong msg_qnum;
2753 abi_ulong msg_qbytes;
2754 abi_ulong msg_lspid;
2755 abi_ulong msg_lrpid;
2756 abi_ulong __unused4;
2757 abi_ulong __unused5;
2758 };
2759
2760 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2761 abi_ulong target_addr)
2762 {
2763 struct target_msqid_ds *target_md;
2764
2765 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2766 return -TARGET_EFAULT;
2767 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2768 return -TARGET_EFAULT;
2769 host_md->msg_stime = tswapal(target_md->msg_stime);
2770 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2771 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2772 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2773 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2774 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2775 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2776 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2777 unlock_user_struct(target_md, target_addr, 0);
2778 return 0;
2779 }
2780
2781 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2782 struct msqid_ds *host_md)
2783 {
2784 struct target_msqid_ds *target_md;
2785
2786 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2787 return -TARGET_EFAULT;
2788 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2789 return -TARGET_EFAULT;
2790 target_md->msg_stime = tswapal(host_md->msg_stime);
2791 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2792 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2793 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2794 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2795 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2796 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2797 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2798 unlock_user_struct(target_md, target_addr, 1);
2799 return 0;
2800 }
2801
2802 struct target_msginfo {
2803 int msgpool;
2804 int msgmap;
2805 int msgmax;
2806 int msgmnb;
2807 int msgmni;
2808 int msgssz;
2809 int msgtql;
2810 unsigned short int msgseg;
2811 };
2812
2813 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2814 struct msginfo *host_msginfo)
2815 {
2816 struct target_msginfo *target_msginfo;
2817 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2818 return -TARGET_EFAULT;
2819 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2820 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2821 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2822 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2823 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2824 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2825 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2826 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2827 unlock_user_struct(target_msginfo, target_addr, 1);
2828 return 0;
2829 }
2830
2831 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2832 {
2833 struct msqid_ds dsarg;
2834 struct msginfo msginfo;
2835 abi_long ret = -TARGET_EINVAL;
2836
2837 cmd &= 0xff;
2838
2839 switch (cmd) {
2840 case IPC_STAT:
2841 case IPC_SET:
2842 case MSG_STAT:
2843 if (target_to_host_msqid_ds(&dsarg,ptr))
2844 return -TARGET_EFAULT;
2845 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2846 if (host_to_target_msqid_ds(ptr,&dsarg))
2847 return -TARGET_EFAULT;
2848 break;
2849 case IPC_RMID:
2850 ret = get_errno(msgctl(msgid, cmd, NULL));
2851 break;
2852 case IPC_INFO:
2853 case MSG_INFO:
2854 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2855 if (host_to_target_msginfo(ptr, &msginfo))
2856 return -TARGET_EFAULT;
2857 break;
2858 }
2859
2860 return ret;
2861 }
2862
2863 struct target_msgbuf {
2864 abi_long mtype;
2865 char mtext[1];
2866 };
2867
2868 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2869 unsigned int msgsz, int msgflg)
2870 {
2871 struct target_msgbuf *target_mb;
2872 struct msgbuf *host_mb;
2873 abi_long ret = 0;
2874
2875 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2876 return -TARGET_EFAULT;
2877 host_mb = malloc(msgsz+sizeof(long));
2878 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2879 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2880 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2881 free(host_mb);
2882 unlock_user_struct(target_mb, msgp, 0);
2883
2884 return ret;
2885 }
2886
2887 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2888 unsigned int msgsz, abi_long msgtyp,
2889 int msgflg)
2890 {
2891 struct target_msgbuf *target_mb;
2892 char *target_mtext;
2893 struct msgbuf *host_mb;
2894 abi_long ret = 0;
2895
2896 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2897 return -TARGET_EFAULT;
2898
2899 host_mb = g_malloc(msgsz+sizeof(long));
2900 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2901
2902 if (ret > 0) {
2903 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2904 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2905 if (!target_mtext) {
2906 ret = -TARGET_EFAULT;
2907 goto end;
2908 }
2909 memcpy(target_mb->mtext, host_mb->mtext, ret);
2910 unlock_user(target_mtext, target_mtext_addr, ret);
2911 }
2912
2913 target_mb->mtype = tswapal(host_mb->mtype);
2914
2915 end:
2916 if (target_mb)
2917 unlock_user_struct(target_mb, msgp, 1);
2918 g_free(host_mb);
2919 return ret;
2920 }
2921
2922 struct target_shmid_ds
2923 {
2924 struct target_ipc_perm shm_perm;
2925 abi_ulong shm_segsz;
2926 abi_ulong shm_atime;
2927 #if TARGET_ABI_BITS == 32
2928 abi_ulong __unused1;
2929 #endif
2930 abi_ulong shm_dtime;
2931 #if TARGET_ABI_BITS == 32
2932 abi_ulong __unused2;
2933 #endif
2934 abi_ulong shm_ctime;
2935 #if TARGET_ABI_BITS == 32
2936 abi_ulong __unused3;
2937 #endif
2938 int shm_cpid;
2939 int shm_lpid;
2940 abi_ulong shm_nattch;
2941 unsigned long int __unused4;
2942 unsigned long int __unused5;
2943 };
2944
2945 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2946 abi_ulong target_addr)
2947 {
2948 struct target_shmid_ds *target_sd;
2949
2950 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2951 return -TARGET_EFAULT;
2952 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2953 return -TARGET_EFAULT;
2954 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2955 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2956 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2957 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2958 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2959 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2960 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2961 unlock_user_struct(target_sd, target_addr, 0);
2962 return 0;
2963 }
2964
2965 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2966 struct shmid_ds *host_sd)
2967 {
2968 struct target_shmid_ds *target_sd;
2969
2970 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2971 return -TARGET_EFAULT;
2972 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2973 return -TARGET_EFAULT;
2974 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2975 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2976 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2977 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2978 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2979 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2980 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2981 unlock_user_struct(target_sd, target_addr, 1);
2982 return 0;
2983 }
2984
2985 struct target_shminfo {
2986 abi_ulong shmmax;
2987 abi_ulong shmmin;
2988 abi_ulong shmmni;
2989 abi_ulong shmseg;
2990 abi_ulong shmall;
2991 };
2992
2993 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2994 struct shminfo *host_shminfo)
2995 {
2996 struct target_shminfo *target_shminfo;
2997 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2998 return -TARGET_EFAULT;
2999 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3000 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3001 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3002 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3003 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3004 unlock_user_struct(target_shminfo, target_addr, 1);
3005 return 0;
3006 }
3007
3008 struct target_shm_info {
3009 int used_ids;
3010 abi_ulong shm_tot;
3011 abi_ulong shm_rss;
3012 abi_ulong shm_swp;
3013 abi_ulong swap_attempts;
3014 abi_ulong swap_successes;
3015 };
3016
3017 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3018 struct shm_info *host_shm_info)
3019 {
3020 struct target_shm_info *target_shm_info;
3021 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3022 return -TARGET_EFAULT;
3023 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3024 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3025 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3026 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3027 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3028 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3029 unlock_user_struct(target_shm_info, target_addr, 1);
3030 return 0;
3031 }
3032
3033 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3034 {
3035 struct shmid_ds dsarg;
3036 struct shminfo shminfo;
3037 struct shm_info shm_info;
3038 abi_long ret = -TARGET_EINVAL;
3039
3040 cmd &= 0xff;
3041
3042 switch(cmd) {
3043 case IPC_STAT:
3044 case IPC_SET:
3045 case SHM_STAT:
3046 if (target_to_host_shmid_ds(&dsarg, buf))
3047 return -TARGET_EFAULT;
3048 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3049 if (host_to_target_shmid_ds(buf, &dsarg))
3050 return -TARGET_EFAULT;
3051 break;
3052 case IPC_INFO:
3053 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3054 if (host_to_target_shminfo(buf, &shminfo))
3055 return -TARGET_EFAULT;
3056 break;
3057 case SHM_INFO:
3058 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3059 if (host_to_target_shm_info(buf, &shm_info))
3060 return -TARGET_EFAULT;
3061 break;
3062 case IPC_RMID:
3063 case SHM_LOCK:
3064 case SHM_UNLOCK:
3065 ret = get_errno(shmctl(shmid, cmd, NULL));
3066 break;
3067 }
3068
3069 return ret;
3070 }
3071
3072 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3073 {
3074 abi_long raddr;
3075 void *host_raddr;
3076 struct shmid_ds shm_info;
3077 int i,ret;
3078
3079 /* find out the length of the shared memory segment */
3080 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3081 if (is_error(ret)) {
3082 /* can't get length, bail out */
3083 return ret;
3084 }
3085
3086 mmap_lock();
3087
3088 if (shmaddr)
3089 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3090 else {
3091 abi_ulong mmap_start;
3092
3093 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3094
3095 if (mmap_start == -1) {
3096 errno = ENOMEM;
3097 host_raddr = (void *)-1;
3098 } else
3099 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3100 }
3101
3102 if (host_raddr == (void *)-1) {
3103 mmap_unlock();
3104 return get_errno((long)host_raddr);
3105 }
3106 raddr=h2g((unsigned long)host_raddr);
3107
3108 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3109 PAGE_VALID | PAGE_READ |
3110 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3111
3112 for (i = 0; i < N_SHM_REGIONS; i++) {
3113 if (shm_regions[i].start == 0) {
3114 shm_regions[i].start = raddr;
3115 shm_regions[i].size = shm_info.shm_segsz;
3116 break;
3117 }
3118 }
3119
3120 mmap_unlock();
3121 return raddr;
3122
3123 }
3124
3125 static inline abi_long do_shmdt(abi_ulong shmaddr)
3126 {
3127 int i;
3128
3129 for (i = 0; i < N_SHM_REGIONS; ++i) {
3130 if (shm_regions[i].start == shmaddr) {
3131 shm_regions[i].start = 0;
3132 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3133 break;
3134 }
3135 }
3136
3137 return get_errno(shmdt(g2h(shmaddr)));
3138 }
3139
3140 #ifdef TARGET_NR_ipc
3141 /* ??? This only works with linear mappings. */
3142 /* do_ipc() must return target values and target errnos. */
3143 static abi_long do_ipc(unsigned int call, int first,
3144 int second, int third,
3145 abi_long ptr, abi_long fifth)
3146 {
3147 int version;
3148 abi_long ret = 0;
3149
3150 version = call >> 16;
3151 call &= 0xffff;
3152
3153 switch (call) {
3154 case IPCOP_semop:
3155 ret = do_semop(first, ptr, second);
3156 break;
3157
3158 case IPCOP_semget:
3159 ret = get_errno(semget(first, second, third));
3160 break;
3161
3162 case IPCOP_semctl:
3163 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3164 break;
3165
3166 case IPCOP_msgget:
3167 ret = get_errno(msgget(first, second));
3168 break;
3169
3170 case IPCOP_msgsnd:
3171 ret = do_msgsnd(first, ptr, second, third);
3172 break;
3173
3174 case IPCOP_msgctl:
3175 ret = do_msgctl(first, second, ptr);
3176 break;
3177
3178 case IPCOP_msgrcv:
3179 switch (version) {
3180 case 0:
3181 {
3182 struct target_ipc_kludge {
3183 abi_long msgp;
3184 abi_long msgtyp;
3185 } *tmp;
3186
3187 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3188 ret = -TARGET_EFAULT;
3189 break;
3190 }
3191
3192 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3193
3194 unlock_user_struct(tmp, ptr, 0);
3195 break;
3196 }
3197 default:
3198 ret = do_msgrcv(first, ptr, second, fifth, third);
3199 }
3200 break;
3201
3202 case IPCOP_shmat:
3203 switch (version) {
3204 default:
3205 {
3206 abi_ulong raddr;
3207 raddr = do_shmat(first, ptr, second);
3208 if (is_error(raddr))
3209 return get_errno(raddr);
3210 if (put_user_ual(raddr, third))
3211 return -TARGET_EFAULT;
3212 break;
3213 }
3214 case 1:
3215 ret = -TARGET_EINVAL;
3216 break;
3217 }
3218 break;
3219 case IPCOP_shmdt:
3220 ret = do_shmdt(ptr);
3221 break;
3222
3223 case IPCOP_shmget:
3224 /* IPC_* flag values are the same on all linux platforms */
3225 ret = get_errno(shmget(first, second, third));
3226 break;
3227
3228 /* IPC_* and SHM_* command values are the same on all linux platforms */
3229 case IPCOP_shmctl:
3230 ret = do_shmctl(first, second, third);
3231 break;
3232 default:
3233 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3234 ret = -TARGET_ENOSYS;
3235 break;
3236 }
3237 return ret;
3238 }
3239 #endif
3240
3241 /* kernel structure types definitions */
3242
3243 #define STRUCT(name, ...) STRUCT_ ## name,
3244 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3245 enum {
3246 #include "syscall_types.h"
3247 };
3248 #undef STRUCT
3249 #undef STRUCT_SPECIAL
3250
3251 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3252 #define STRUCT_SPECIAL(name)
3253 #include "syscall_types.h"
3254 #undef STRUCT
3255 #undef STRUCT_SPECIAL
3256
3257 typedef struct IOCTLEntry IOCTLEntry;
3258
3259 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3260 int fd, abi_long cmd, abi_long arg);
3261
3262 struct IOCTLEntry {
3263 unsigned int target_cmd;
3264 unsigned int host_cmd;
3265 const char *name;
3266 int access;
3267 do_ioctl_fn *do_ioctl;
3268 const argtype arg_type[5];
3269 };
3270
3271 #define IOC_R 0x0001
3272 #define IOC_W 0x0002
3273 #define IOC_RW (IOC_R | IOC_W)
3274
3275 #define MAX_STRUCT_SIZE 4096
3276
3277 #ifdef CONFIG_FIEMAP
3278 /* So fiemap access checks don't overflow on 32 bit systems.
3279 * This is very slightly smaller than the limit imposed by
3280 * the underlying kernel.
3281 */
3282 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3283 / sizeof(struct fiemap_extent))
3284
3285 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3286 int fd, abi_long cmd, abi_long arg)
3287 {
3288 /* The parameter for this ioctl is a struct fiemap followed
3289 * by an array of struct fiemap_extent whose size is set
3290 * in fiemap->fm_extent_count. The array is filled in by the
3291 * ioctl.
3292 */
3293 int target_size_in, target_size_out;
3294 struct fiemap *fm;
3295 const argtype *arg_type = ie->arg_type;
3296 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3297 void *argptr, *p;
3298 abi_long ret;
3299 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3300 uint32_t outbufsz;
3301 int free_fm = 0;
3302
3303 assert(arg_type[0] == TYPE_PTR);
3304 assert(ie->access == IOC_RW);
3305 arg_type++;
3306 target_size_in = thunk_type_size(arg_type, 0);
3307 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3308 if (!argptr) {
3309 return -TARGET_EFAULT;
3310 }
3311 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3312 unlock_user(argptr, arg, 0);
3313 fm = (struct fiemap *)buf_temp;
3314 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3315 return -TARGET_EINVAL;
3316 }
3317
3318 outbufsz = sizeof (*fm) +
3319 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3320
3321 if (outbufsz > MAX_STRUCT_SIZE) {
3322 /* We can't fit all the extents into the fixed size buffer.
3323 * Allocate one that is large enough and use it instead.
3324 */
3325 fm = malloc(outbufsz);
3326 if (!fm) {
3327 return -TARGET_ENOMEM;
3328 }
3329 memcpy(fm, buf_temp, sizeof(struct fiemap));
3330 free_fm = 1;
3331 }
3332 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3333 if (!is_error(ret)) {
3334 target_size_out = target_size_in;
3335 /* An extent_count of 0 means we were only counting the extents
3336 * so there are no structs to copy
3337 */
3338 if (fm->fm_extent_count != 0) {
3339 target_size_out += fm->fm_mapped_extents * extent_size;
3340 }
3341 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3342 if (!argptr) {
3343 ret = -TARGET_EFAULT;
3344 } else {
3345 /* Convert the struct fiemap */
3346 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3347 if (fm->fm_extent_count != 0) {
3348 p = argptr + target_size_in;
3349 /* ...and then all the struct fiemap_extents */
3350 for (i = 0; i < fm->fm_mapped_extents; i++) {
3351 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3352 THUNK_TARGET);
3353 p += extent_size;
3354 }
3355 }
3356 unlock_user(argptr, arg, target_size_out);
3357 }
3358 }
3359 if (free_fm) {
3360 free(fm);
3361 }
3362 return ret;
3363 }
3364 #endif
3365
3366 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3367 int fd, abi_long cmd, abi_long arg)
3368 {
3369 const argtype *arg_type = ie->arg_type;
3370 int target_size;
3371 void *argptr;
3372 int ret;
3373 struct ifconf *host_ifconf;
3374 uint32_t outbufsz;
3375 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3376 int target_ifreq_size;
3377 int nb_ifreq;
3378 int free_buf = 0;
3379 int i;
3380 int target_ifc_len;
3381 abi_long target_ifc_buf;
3382 int host_ifc_len;
3383 char *host_ifc_buf;
3384
3385 assert(arg_type[0] == TYPE_PTR);
3386 assert(ie->access == IOC_RW);
3387
3388 arg_type++;
3389 target_size = thunk_type_size(arg_type, 0);
3390
3391 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3392 if (!argptr)
3393 return -TARGET_EFAULT;
3394 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3395 unlock_user(argptr, arg, 0);
3396
3397 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3398 target_ifc_len = host_ifconf->ifc_len;
3399 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3400
3401 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3402 nb_ifreq = target_ifc_len / target_ifreq_size;
3403 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3404
3405 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3406 if (outbufsz > MAX_STRUCT_SIZE) {
3407 /* We can't fit all the extents into the fixed size buffer.
3408 * Allocate one that is large enough and use it instead.
3409 */
3410 host_ifconf = malloc(outbufsz);
3411 if (!host_ifconf) {
3412 return -TARGET_ENOMEM;
3413 }
3414 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3415 free_buf = 1;
3416 }
3417 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3418
3419 host_ifconf->ifc_len = host_ifc_len;
3420 host_ifconf->ifc_buf = host_ifc_buf;
3421
3422 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3423 if (!is_error(ret)) {
3424 /* convert host ifc_len to target ifc_len */
3425
3426 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3427 target_ifc_len = nb_ifreq * target_ifreq_size;
3428 host_ifconf->ifc_len = target_ifc_len;
3429
3430 /* restore target ifc_buf */
3431
3432 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3433
3434 /* copy struct ifconf to target user */
3435
3436 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3437 if (!argptr)
3438 return -TARGET_EFAULT;
3439 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3440 unlock_user(argptr, arg, target_size);
3441
3442 /* copy ifreq[] to target user */
3443
3444 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3445 for (i = 0; i < nb_ifreq ; i++) {
3446 thunk_convert(argptr + i * target_ifreq_size,
3447 host_ifc_buf + i * sizeof(struct ifreq),
3448 ifreq_arg_type, THUNK_TARGET);
3449 }
3450 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3451 }
3452
3453 if (free_buf) {
3454 free(host_ifconf);
3455 }
3456
3457 return ret;
3458 }
3459
3460 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3461 abi_long cmd, abi_long arg)
3462 {
3463 void *argptr;
3464 struct dm_ioctl *host_dm;
3465 abi_long guest_data;
3466 uint32_t guest_data_size;
3467 int target_size;
3468 const argtype *arg_type = ie->arg_type;
3469 abi_long ret;
3470 void *big_buf = NULL;
3471 char *host_data;
3472
3473 arg_type++;
3474 target_size = thunk_type_size(arg_type, 0);
3475 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3476 if (!argptr) {
3477 ret = -TARGET_EFAULT;
3478 goto out;
3479 }
3480 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3481 unlock_user(argptr, arg, 0);
3482
3483 /* buf_temp is too small, so fetch things into a bigger buffer */
3484 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3485 memcpy(big_buf, buf_temp, target_size);
3486 buf_temp = big_buf;
3487 host_dm = big_buf;
3488
3489 guest_data = arg + host_dm->data_start;
3490 if ((guest_data - arg) < 0) {
3491 ret = -EINVAL;
3492 goto out;
3493 }
3494 guest_data_size = host_dm->data_size - host_dm->data_start;
3495 host_data = (char*)host_dm + host_dm->data_start;
3496
3497 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3498 switch (ie->host_cmd) {
3499 case DM_REMOVE_ALL:
3500 case DM_LIST_DEVICES:
3501 case DM_DEV_CREATE:
3502 case DM_DEV_REMOVE:
3503 case DM_DEV_SUSPEND:
3504 case DM_DEV_STATUS:
3505 case DM_DEV_WAIT:
3506 case DM_TABLE_STATUS:
3507 case DM_TABLE_CLEAR:
3508 case DM_TABLE_DEPS:
3509 case DM_LIST_VERSIONS:
3510 /* no input data */
3511 break;
3512 case DM_DEV_RENAME:
3513 case DM_DEV_SET_GEOMETRY:
3514 /* data contains only strings */
3515 memcpy(host_data, argptr, guest_data_size);
3516 break;
3517 case DM_TARGET_MSG:
3518 memcpy(host_data, argptr, guest_data_size);
3519 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3520 break;
3521 case DM_TABLE_LOAD:
3522 {
3523 void *gspec = argptr;
3524 void *cur_data = host_data;
3525 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3526 int spec_size = thunk_type_size(arg_type, 0);
3527 int i;
3528
3529 for (i = 0; i < host_dm->target_count; i++) {
3530 struct dm_target_spec *spec = cur_data;
3531 uint32_t next;
3532 int slen;
3533
3534 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3535 slen = strlen((char*)gspec + spec_size) + 1;
3536 next = spec->next;
3537 spec->next = sizeof(*spec) + slen;
3538 strcpy((char*)&spec[1], gspec + spec_size);
3539 gspec += next;
3540 cur_data += spec->next;
3541 }
3542 break;
3543 }
3544 default:
3545 ret = -TARGET_EINVAL;
3546 goto out;
3547 }
3548 unlock_user(argptr, guest_data, 0);
3549
3550 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3551 if (!is_error(ret)) {
3552 guest_data = arg + host_dm->data_start;
3553 guest_data_size = host_dm->data_size - host_dm->data_start;
3554 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3555 switch (ie->host_cmd) {
3556 case DM_REMOVE_ALL:
3557 case DM_DEV_CREATE:
3558 case DM_DEV_REMOVE:
3559 case DM_DEV_RENAME:
3560 case DM_DEV_SUSPEND:
3561 case DM_DEV_STATUS:
3562 case DM_TABLE_LOAD:
3563 case DM_TABLE_CLEAR:
3564 case DM_TARGET_MSG:
3565 case DM_DEV_SET_GEOMETRY:
3566 /* no return data */
3567 break;
3568 case DM_LIST_DEVICES:
3569 {
3570 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3571 uint32_t remaining_data = guest_data_size;
3572 void *cur_data = argptr;
3573 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3574 int nl_size = 12; /* can't use thunk_size due to alignment */
3575
3576 while (1) {
3577 uint32_t next = nl->next;
3578 if (next) {
3579 nl->next = nl_size + (strlen(nl->name) + 1);
3580 }
3581 if (remaining_data < nl->next) {
3582 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3583 break;
3584 }
3585 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3586 strcpy(cur_data + nl_size, nl->name);
3587 cur_data += nl->next;
3588 remaining_data -= nl->next;
3589 if (!next) {
3590 break;
3591 }
3592 nl = (void*)nl + next;
3593 }
3594 break;
3595 }
3596 case DM_DEV_WAIT:
3597 case DM_TABLE_STATUS:
3598 {
3599 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3600 void *cur_data = argptr;
3601 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3602 int spec_size = thunk_type_size(arg_type, 0);
3603 int i;
3604
3605 for (i = 0; i < host_dm->target_count; i++) {
3606 uint32_t next = spec->next;
3607 int slen = strlen((char*)&spec[1]) + 1;
3608 spec->next = (cur_data - argptr) + spec_size + slen;
3609 if (guest_data_size < spec->next) {
3610 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3611 break;
3612 }
3613 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3614 strcpy(cur_data + spec_size, (char*)&spec[1]);
3615 cur_data = argptr + spec->next;
3616 spec = (void*)host_dm + host_dm->data_start + next;
3617 }
3618 break;
3619 }
3620 case DM_TABLE_DEPS:
3621 {
3622 void *hdata = (void*)host_dm + host_dm->data_start;
3623 int count = *(uint32_t*)hdata;
3624 uint64_t *hdev = hdata + 8;
3625 uint64_t *gdev = argptr + 8;
3626 int i;
3627
3628 *(uint32_t*)argptr = tswap32(count);
3629 for (i = 0; i < count; i++) {
3630 *gdev = tswap64(*hdev);
3631 gdev++;
3632 hdev++;
3633 }
3634 break;
3635 }
3636 case DM_LIST_VERSIONS:
3637 {
3638 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3639 uint32_t remaining_data = guest_data_size;
3640 void *cur_data = argptr;
3641 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3642 int vers_size = thunk_type_size(arg_type, 0);
3643
3644 while (1) {
3645 uint32_t next = vers->next;
3646 if (next) {
3647 vers->next = vers_size + (strlen(vers->name) + 1);
3648 }
3649 if (remaining_data < vers->next) {
3650 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3651 break;
3652 }
3653 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3654 strcpy(cur_data + vers_size, vers->name);
3655 cur_data += vers->next;
3656 remaining_data -= vers->next;
3657 if (!next) {
3658 break;
3659 }
3660 vers = (void*)vers + next;
3661 }
3662 break;
3663 }
3664 default:
3665 ret = -TARGET_EINVAL;
3666 goto out;
3667 }
3668 unlock_user(argptr, guest_data, guest_data_size);
3669
3670 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3671 if (!argptr) {
3672 ret = -TARGET_EFAULT;
3673 goto out;
3674 }
3675 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3676 unlock_user(argptr, arg, target_size);
3677 }
3678 out:
3679 g_free(big_buf);
3680 return ret;
3681 }
3682
3683 static IOCTLEntry ioctl_entries[] = {
3684 #define IOCTL(cmd, access, ...) \
3685 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3686 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3687 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3688 #include "ioctls.h"
3689 { 0, 0, },
3690 };
3691
3692 /* ??? Implement proper locking for ioctls. */
3693 /* do_ioctl() Must return target values and target errnos. */
3694 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3695 {
3696 const IOCTLEntry *ie;
3697 const argtype *arg_type;
3698 abi_long ret;
3699 uint8_t buf_temp[MAX_STRUCT_SIZE];
3700 int target_size;
3701 void *argptr;
3702
3703 ie = ioctl_entries;
3704 for(;;) {
3705 if (ie->target_cmd == 0) {
3706 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3707 return -TARGET_ENOSYS;
3708 }
3709 if (ie->target_cmd == cmd)
3710 break;
3711 ie++;
3712 }
3713 arg_type = ie->arg_type;
3714 #if defined(DEBUG)
3715 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3716 #endif
3717 if (ie->do_ioctl) {
3718 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3719 }
3720
3721 switch(arg_type[0]) {
3722 case TYPE_NULL:
3723 /* no argument */
3724 ret = get_errno(ioctl(fd, ie->host_cmd));
3725 break;
3726 case TYPE_PTRVOID:
3727 case TYPE_INT:
3728 /* int argment */
3729 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3730 break;
3731 case TYPE_PTR:
3732 arg_type++;
3733 target_size = thunk_type_size(arg_type, 0);
3734 switch(ie->access) {
3735 case IOC_R:
3736 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3737 if (!is_error(ret)) {
3738 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3739 if (!argptr)
3740 return -TARGET_EFAULT;
3741 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3742 unlock_user(argptr, arg, target_size);
3743 }
3744 break;
3745 case IOC_W:
3746 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3747 if (!argptr)
3748 return -TARGET_EFAULT;
3749 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3750 unlock_user(argptr, arg, 0);
3751 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3752 break;
3753 default:
3754 case IOC_RW:
3755 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3756 if (!argptr)
3757 return -TARGET_EFAULT;
3758 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3759 unlock_user(argptr, arg, 0);
3760 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3761 if (!is_error(ret)) {
3762 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3763 if (!argptr)
3764 return -TARGET_EFAULT;
3765 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3766 unlock_user(argptr, arg, target_size);
3767 }
3768 break;
3769 }
3770 break;
3771 default:
3772 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3773 (long)cmd, arg_type[0]);
3774 ret = -TARGET_ENOSYS;
3775 break;
3776 }
3777 return ret;
3778 }
3779
3780 static const bitmask_transtbl iflag_tbl[] = {
3781 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3782 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3783 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3784 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3785 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3786 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3787 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3788 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3789 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3790 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3791 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3792 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3793 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3794 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3795 { 0, 0, 0, 0 }
3796 };
3797
3798 static const bitmask_transtbl oflag_tbl[] = {
3799 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3800 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3801 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3802 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3803 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3804 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3805 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3806 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3807 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3808 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3809 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3810 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3811 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3812 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3813 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3814 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3815 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3816 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3817 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3818 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3819 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3820 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3821 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3822 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3823 { 0, 0, 0, 0 }
3824 };
3825
3826 static const bitmask_transtbl cflag_tbl[] = {
3827 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3828 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3829 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3830 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3831 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3832 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3833 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3834 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3835 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3836 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3837 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3838 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3839 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3840 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3841 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3842 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3843 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3844 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3845 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3846 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3847 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3848 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3849 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3850 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3851 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3852 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3853 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3854 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3855 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3856 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3857 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3858 { 0, 0, 0, 0 }
3859 };
3860
3861 static const bitmask_transtbl lflag_tbl[] = {
3862 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3863 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3864 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3865 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3866 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3867 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3868 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3869 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3870 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3871 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3872 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3873 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3874 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3875 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3876 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3877 { 0, 0, 0, 0 }
3878 };
3879
3880 static void target_to_host_termios (void *dst, const void *src)
3881 {
3882 struct host_termios *host = dst;
3883 const struct target_termios *target = src;
3884
3885 host->c_iflag =
3886 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3887 host->c_oflag =
3888 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3889 host->c_cflag =
3890 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3891 host->c_lflag =
3892 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3893 host->c_line = target->c_line;
3894
3895 memset(host->c_cc, 0, sizeof(host->c_cc));
3896 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3897 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3898 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3899 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3900 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3901 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3902 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3903 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3904 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3905 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3906 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3907 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3908 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3909 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3910 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3911 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3912 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3913 }
3914
3915 static void host_to_target_termios (void *dst, const void *src)
3916 {
3917 struct target_termios *target = dst;
3918 const struct host_termios *host = src;
3919
3920 target->c_iflag =
3921 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3922 target->c_oflag =
3923 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3924 target->c_cflag =
3925 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3926 target->c_lflag =
3927 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3928 target->c_line = host->c_line;
3929
3930 memset(target->c_cc, 0, sizeof(target->c_cc));
3931 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3932 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3933 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3934 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3935 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3936 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3937 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3938 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3939 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3940 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3941 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3942 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3943 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3944 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3945 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3946 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3947 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3948 }
3949
3950 static const StructEntry struct_termios_def = {
3951 .convert = { host_to_target_termios, target_to_host_termios },
3952 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3953 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3954 };
3955
3956 static bitmask_transtbl mmap_flags_tbl[] = {
3957 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3958 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3959 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3960 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3961 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3962 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3963 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3964 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3965 { 0, 0, 0, 0 }
3966 };
3967
3968 #if defined(TARGET_I386)
3969
3970 /* NOTE: there is really one LDT for all the threads */
3971 static uint8_t *ldt_table;
3972
3973 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3974 {
3975 int size;
3976 void *p;
3977
3978 if (!ldt_table)
3979 return 0;
3980 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3981 if (size > bytecount)
3982 size = bytecount;
3983 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3984 if (!p)
3985 return -TARGET_EFAULT;
3986 /* ??? Should this by byteswapped? */
3987 memcpy(p, ldt_table, size);
3988 unlock_user(p, ptr, size);
3989 return size;
3990 }
3991
3992 /* XXX: add locking support */
3993 static abi_long write_ldt(CPUX86State *env,
3994 abi_ulong ptr, unsigned long bytecount, int oldmode)
3995 {
3996 struct target_modify_ldt_ldt_s ldt_info;
3997 struct target_modify_ldt_ldt_s *target_ldt_info;
3998 int seg_32bit, contents, read_exec_only, limit_in_pages;
3999 int seg_not_present, useable, lm;
4000 uint32_t *lp, entry_1, entry_2;
4001
4002 if (bytecount != sizeof(ldt_info))
4003 return -TARGET_EINVAL;
4004 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4005 return -TARGET_EFAULT;
4006 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4007 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4008 ldt_info.limit = tswap32(target_ldt_info->limit);
4009 ldt_info.flags = tswap32(target_ldt_info->flags);
4010 unlock_user_struct(target_ldt_info, ptr, 0);
4011
4012 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4013 return -TARGET_EINVAL;
4014 seg_32bit = ldt_info.flags & 1;
4015 contents = (ldt_info.flags >> 1) & 3;
4016 read_exec_only = (ldt_info.flags >> 3) & 1;
4017 limit_in_pages = (ldt_info.flags >> 4) & 1;
4018 seg_not_present = (ldt_info.flags >> 5) & 1;
4019 useable = (ldt_info.flags >> 6) & 1;
4020 #ifdef TARGET_ABI32
4021 lm = 0;
4022 #else
4023 lm = (ldt_info.flags >> 7) & 1;
4024 #endif
4025 if (contents == 3) {
4026 if (oldmode)
4027 return -TARGET_EINVAL;
4028 if (seg_not_present == 0)
4029 return -TARGET_EINVAL;
4030 }
4031 /* allocate the LDT */
4032 if (!ldt_table) {
4033 env->ldt.base = target_mmap(0,
4034 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4035 PROT_READ|PROT_WRITE,
4036 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4037 if (env->ldt.base == -1)
4038 return -TARGET_ENOMEM;
4039 memset(g2h(env->ldt.base), 0,
4040 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4041 env->ldt.limit = 0xffff;
4042 ldt_table = g2h(env->ldt.base);
4043 }
4044
4045 /* NOTE: same code as Linux kernel */
4046 /* Allow LDTs to be cleared by the user. */
4047 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4048 if (oldmode ||
4049 (contents == 0 &&
4050 read_exec_only == 1 &&
4051 seg_32bit == 0 &&
4052 limit_in_pages == 0 &&
4053 seg_not_present == 1 &&
4054 useable == 0 )) {
4055 entry_1 = 0;
4056 entry_2 = 0;
4057 goto install;
4058 }
4059 }
4060
4061 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4062 (ldt_info.limit & 0x0ffff);
4063 entry_2 = (ldt_info.base_addr & 0xff000000) |
4064 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4065 (ldt_info.limit & 0xf0000) |
4066 ((read_exec_only ^ 1) << 9) |
4067 (contents << 10) |
4068 ((seg_not_present ^ 1) << 15) |
4069 (seg_32bit << 22) |
4070 (limit_in_pages << 23) |
4071 (lm << 21) |
4072 0x7000;
4073 if (!oldmode)
4074 entry_2 |= (useable << 20);
4075
4076 /* Install the new entry ... */
4077 install:
4078 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4079 lp[0] = tswap32(entry_1);
4080 lp[1] = tswap32(entry_2);
4081 return 0;
4082 }
4083
4084 /* specific and weird i386 syscalls */
4085 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4086 unsigned long bytecount)
4087 {
4088 abi_long ret;
4089
4090 switch (func) {
4091 case 0:
4092 ret = read_ldt(ptr, bytecount);
4093 break;
4094 case 1:
4095 ret = write_ldt(env, ptr, bytecount, 1);
4096 break;
4097 case 0x11:
4098 ret = write_ldt(env, ptr, bytecount, 0);
4099 break;
4100 default:
4101 ret = -TARGET_ENOSYS;
4102 break;
4103 }
4104 return ret;
4105 }
4106
4107 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4108 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4109 {
4110 uint64_t *gdt_table = g2h(env->gdt.base);
4111 struct target_modify_ldt_ldt_s ldt_info;
4112 struct target_modify_ldt_ldt_s *target_ldt_info;
4113 int seg_32bit, contents, read_exec_only, limit_in_pages;
4114 int seg_not_present, useable, lm;
4115 uint32_t *lp, entry_1, entry_2;
4116 int i;
4117
4118 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4119 if (!target_ldt_info)
4120 return -TARGET_EFAULT;
4121 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4122 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4123 ldt_info.limit = tswap32(target_ldt_info->limit);
4124 ldt_info.flags = tswap32(target_ldt_info->flags);
4125 if (ldt_info.entry_number == -1) {
4126 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4127 if (gdt_table[i] == 0) {
4128 ldt_info.entry_number = i;
4129 target_ldt_info->entry_number = tswap32(i);
4130 break;
4131 }
4132 }
4133 }
4134 unlock_user_struct(target_ldt_info, ptr, 1);
4135
4136 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4137 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4138 return -TARGET_EINVAL;
4139 seg_32bit = ldt_info.flags & 1;
4140 contents = (ldt_info.flags >> 1) & 3;
4141 read_exec_only = (ldt_info.flags >> 3) & 1;
4142 limit_in_pages = (ldt_info.flags >> 4) & 1;
4143 seg_not_present = (ldt_info.flags >> 5) & 1;
4144 useable = (ldt_info.flags >> 6) & 1;
4145 #ifdef TARGET_ABI32
4146 lm = 0;
4147 #else
4148 lm = (ldt_info.flags >> 7) & 1;
4149 #endif
4150
4151 if (contents == 3) {
4152 if (seg_not_present == 0)
4153 return -TARGET_EINVAL;
4154 }
4155
4156 /* NOTE: same code as Linux kernel */
4157 /* Allow LDTs to be cleared by the user. */
4158 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4159 if ((contents == 0 &&
4160 read_exec_only == 1 &&
4161 seg_32bit == 0 &&
4162 limit_in_pages == 0 &&
4163 seg_not_present == 1 &&
4164 useable == 0 )) {
4165 entry_1 = 0;
4166 entry_2 = 0;
4167 goto install;
4168 }
4169 }
4170
4171 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4172 (ldt_info.limit & 0x0ffff);
4173 entry_2 = (ldt_info.base_addr & 0xff000000) |
4174 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4175 (ldt_info.limit & 0xf0000) |
4176 ((read_exec_only ^ 1) << 9) |
4177 (contents << 10) |
4178 ((seg_not_present ^ 1) << 15) |
4179 (seg_32bit << 22) |
4180 (limit_in_pages << 23) |
4181 (useable << 20) |
4182 (lm << 21) |
4183 0x7000;
4184
4185 /* Install the new entry ... */
4186 install:
4187 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4188 lp[0] = tswap32(entry_1);
4189 lp[1] = tswap32(entry_2);
4190 return 0;
4191 }
4192
4193 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4194 {
4195 struct target_modify_ldt_ldt_s *target_ldt_info;
4196 uint64_t *gdt_table = g2h(env->gdt.base);
4197 uint32_t base_addr, limit, flags;
4198 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4199 int seg_not_present, useable, lm;
4200 uint32_t *lp, entry_1, entry_2;
4201
4202 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4203 if (!target_ldt_info)
4204 return -TARGET_EFAULT;
4205 idx = tswap32(target_ldt_info->entry_number);
4206 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4207 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4208 unlock_user_struct(target_ldt_info, ptr, 1);
4209 return -TARGET_EINVAL;
4210 }
4211 lp = (uint32_t *)(gdt_table + idx);
4212 entry_1 = tswap32(lp[0]);
4213 entry_2 = tswap32(lp[1]);
4214
4215 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4216 contents = (entry_2 >> 10) & 3;
4217 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4218 seg_32bit = (entry_2 >> 22) & 1;
4219 limit_in_pages = (entry_2 >> 23) & 1;
4220 useable = (entry_2 >> 20) & 1;
4221 #ifdef TARGET_ABI32
4222 lm = 0;
4223 #else
4224 lm = (entry_2 >> 21) & 1;
4225 #endif
4226 flags = (seg_32bit << 0) | (contents << 1) |
4227 (read_exec_only << 3) | (limit_in_pages << 4) |
4228 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4229 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4230 base_addr = (entry_1 >> 16) |
4231 (entry_2 & 0xff000000) |
4232 ((entry_2 & 0xff) << 16);
4233 target_ldt_info->base_addr = tswapal(base_addr);
4234 target_ldt_info->limit = tswap32(limit);
4235 target_ldt_info->flags = tswap32(flags);
4236 unlock_user_struct(target_ldt_info, ptr, 1);
4237 return 0;
4238 }
4239 #endif /* TARGET_I386 && TARGET_ABI32 */
4240
4241 #ifndef TARGET_ABI32
4242 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4243 {
4244 abi_long ret = 0;
4245 abi_ulong val;
4246 int idx;
4247
4248 switch(code) {
4249 case TARGET_ARCH_SET_GS:
4250 case TARGET_ARCH_SET_FS:
4251 if (code == TARGET_ARCH_SET_GS)
4252 idx = R_GS;
4253 else
4254 idx = R_FS;
4255 cpu_x86_load_seg(env, idx, 0);
4256 env->segs[idx].base = addr;
4257 break;
4258 case TARGET_ARCH_GET_GS:
4259 case TARGET_ARCH_GET_FS:
4260 if (code == TARGET_ARCH_GET_GS)
4261 idx = R_GS;
4262 else
4263 idx = R_FS;
4264 val = env->segs[idx].base;
4265 if (put_user(val, addr, abi_ulong))
4266 ret = -TARGET_EFAULT;
4267 break;
4268 default:
4269 ret = -TARGET_EINVAL;
4270 break;
4271 }
4272 return ret;
4273 }
4274 #endif
4275
4276 #endif /* defined(TARGET_I386) */
4277
4278 #define NEW_STACK_SIZE 0x40000
4279
4280 #if defined(CONFIG_USE_NPTL)
4281
4282 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4283 typedef struct {
4284 CPUArchState *env;
4285 pthread_mutex_t mutex;
4286 pthread_cond_t cond;
4287 pthread_t thread;
4288 uint32_t tid;
4289 abi_ulong child_tidptr;
4290 abi_ulong parent_tidptr;
4291 sigset_t sigmask;
4292 } new_thread_info;
4293
4294 static void *clone_func(void *arg)
4295 {
4296 new_thread_info *info = arg;
4297 CPUArchState *env;
4298 TaskState *ts;
4299
4300 env = info->env;
4301 thread_env = env;
4302 ts = (TaskState *)thread_env->opaque;
4303 info->tid = gettid();
4304 env->host_tid = info->tid;
4305 task_settid(ts);
4306 if (info->child_tidptr)
4307 put_user_u32(info->tid, info->child_tidptr);
4308 if (info->parent_tidptr)
4309 put_user_u32(info->tid, info->parent_tidptr);
4310 /* Enable signals. */
4311 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4312 /* Signal to the parent that we're ready. */
4313 pthread_mutex_lock(&info->mutex);
4314 pthread_cond_broadcast(&info->cond);
4315 pthread_mutex_unlock(&info->mutex);
4316 /* Wait until the parent has finshed initializing the tls state. */
4317 pthread_mutex_lock(&clone_lock);
4318 pthread_mutex_unlock(&clone_lock);
4319 cpu_loop(env);
4320 /* never exits */
4321 return NULL;
4322 }
4323 #else
4324
4325 static int clone_func(void *arg)
4326 {
4327 CPUArchState *env = arg;
4328 cpu_loop(env);
4329 /* never exits */
4330 return 0;
4331 }
4332 #endif
4333
4334 /* do_fork() Must return host values and target errnos (unlike most
4335 do_*() functions). */
4336 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4337 abi_ulong parent_tidptr, target_ulong newtls,
4338 abi_ulong child_tidptr)
4339 {
4340 int ret;
4341 TaskState *ts;
4342 CPUArchState *new_env;
4343 #if defined(CONFIG_USE_NPTL)
4344 unsigned int nptl_flags;
4345 sigset_t sigmask;
4346 #else
4347 uint8_t *new_stack;
4348 #endif
4349
4350 /* Emulate vfork() with fork() */
4351 if (flags & CLONE_VFORK)
4352 flags &= ~(CLONE_VFORK | CLONE_VM);
4353
4354 if (flags & CLONE_VM) {
4355 TaskState *parent_ts = (TaskState *)env->opaque;
4356 #if defined(CONFIG_USE_NPTL)
4357 new_thread_info info;
4358 pthread_attr_t attr;
4359 #endif
4360 ts = g_malloc0(sizeof(TaskState));
4361 init_task_state(ts);
4362 /* we create a new CPU instance. */
4363 new_env = cpu_copy(env);
4364 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4365 cpu_reset(ENV_GET_CPU(new_env));
4366 #endif
4367 /* Init regs that differ from the parent. */
4368 cpu_clone_regs(new_env, newsp);
4369 new_env->opaque = ts;
4370 ts->bprm = parent_ts->bprm;
4371 ts->info = parent_ts->info;
4372 #if defined(CONFIG_USE_NPTL)
4373 nptl_flags = flags;
4374 flags &= ~CLONE_NPTL_FLAGS2;
4375
4376 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4377 ts->child_tidptr = child_tidptr;
4378 }
4379
4380 if (nptl_flags & CLONE_SETTLS)
4381 cpu_set_tls (new_env, newtls);
4382
4383 /* Grab a mutex so that thread setup appears atomic. */
4384 pthread_mutex_lock(&clone_lock);
4385
4386 memset(&info, 0, sizeof(info));
4387 pthread_mutex_init(&info.mutex, NULL);
4388 pthread_mutex_lock(&info.mutex);
4389 pthread_cond_init(&info.cond, NULL);
4390 info.env = new_env;
4391 if (nptl_flags & CLONE_CHILD_SETTID)
4392 info.child_tidptr = child_tidptr;
4393 if (nptl_flags & CLONE_PARENT_SETTID)
4394 info.parent_tidptr = parent_tidptr;
4395
4396 ret = pthread_attr_init(&attr);
4397 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4398 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4399 /* It is not safe to deliver signals until the child has finished
4400 initializing, so temporarily block all signals. */
4401 sigfillset(&sigmask);
4402 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4403
4404 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4405 /* TODO: Free new CPU state if thread creation failed. */
4406
4407 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4408 pthread_attr_destroy(&attr);
4409 if (ret == 0) {
4410 /* Wait for the child to initialize. */
4411 pthread_cond_wait(&info.cond, &info.mutex);
4412 ret = info.tid;
4413 if (flags & CLONE_PARENT_SETTID)
4414 put_user_u32(ret, parent_tidptr);
4415 } else {
4416 ret = -1;
4417 }
4418 pthread_mutex_unlock(&info.mutex);
4419 pthread_cond_destroy(&info.cond);
4420 pthread_mutex_destroy(&info.mutex);
4421 pthread_mutex_unlock(&clone_lock);
4422 #else
4423 if (flags & CLONE_NPTL_FLAGS2)
4424 return -EINVAL;
4425 /* This is probably going to die very quickly, but do it anyway. */
4426 new_stack = g_malloc0 (NEW_STACK_SIZE);
4427 #ifdef __ia64__
4428 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4429 #else
4430 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4431 #endif
4432 #endif
4433 } else {
4434 /* if no CLONE_VM, we consider it is a fork */
4435 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4436 return -EINVAL;
4437 fork_start();
4438 ret = fork();
4439 if (ret == 0) {
4440 /* Child Process. */
4441 cpu_clone_regs(env, newsp);
4442 fork_end(1);
4443 #if defined(CONFIG_USE_NPTL)
4444 /* There is a race condition here. The parent process could
4445 theoretically read the TID in the child process before the child
4446 tid is set. This would require using either ptrace
4447 (not implemented) or having *_tidptr to point at a shared memory
4448 mapping. We can't repeat the spinlock hack used above because
4449 the child process gets its own copy of the lock. */
4450 if (flags & CLONE_CHILD_SETTID)
4451 put_user_u32(gettid(), child_tidptr);
4452 if (flags & CLONE_PARENT_SETTID)
4453 put_user_u32(gettid(), parent_tidptr);
4454 ts = (TaskState *)env->opaque;
4455 if (flags & CLONE_SETTLS)
4456 cpu_set_tls (env, newtls);
4457 if (flags & CLONE_CHILD_CLEARTID)
4458 ts->child_tidptr = child_tidptr;
4459 #endif
4460 } else {
4461 fork_end(0);
4462 }
4463 }
4464 return ret;
4465 }
4466
4467 /* warning : doesn't handle linux specific flags... */
4468 static int target_to_host_fcntl_cmd(int cmd)
4469 {
4470 switch(cmd) {
4471 case TARGET_F_DUPFD:
4472 case TARGET_F_GETFD:
4473 case TARGET_F_SETFD:
4474 case TARGET_F_GETFL:
4475 case TARGET_F_SETFL:
4476 return cmd;
4477 case TARGET_F_GETLK:
4478 return F_GETLK;
4479 case TARGET_F_SETLK:
4480 return F_SETLK;
4481 case TARGET_F_SETLKW:
4482 return F_SETLKW;
4483 case TARGET_F_GETOWN:
4484 return F_GETOWN;
4485 case TARGET_F_SETOWN:
4486 return F_SETOWN;
4487 case TARGET_F_GETSIG:
4488 return F_GETSIG;
4489 case TARGET_F_SETSIG:
4490 return F_SETSIG;
4491 #if TARGET_ABI_BITS == 32
4492 case TARGET_F_GETLK64:
4493 return F_GETLK64;
4494 case TARGET_F_SETLK64:
4495 return F_SETLK64;
4496 case TARGET_F_SETLKW64:
4497 return F_SETLKW64;
4498 #endif
4499 case TARGET_F_SETLEASE:
4500 return F_SETLEASE;
4501 case TARGET_F_GETLEASE:
4502 return F_GETLEASE;
4503 #ifdef F_DUPFD_CLOEXEC
4504 case TARGET_F_DUPFD_CLOEXEC:
4505 return F_DUPFD_CLOEXEC;
4506 #endif
4507 case TARGET_F_NOTIFY:
4508 return F_NOTIFY;
4509 default:
4510 return -TARGET_EINVAL;
4511 }
4512 return -TARGET_EINVAL;
4513 }
4514
4515 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4516 {
4517 struct flock fl;
4518 struct target_flock *target_fl;
4519 struct flock64 fl64;
4520 struct target_flock64 *target_fl64;
4521 abi_long ret;
4522 int host_cmd = target_to_host_fcntl_cmd(cmd);
4523
4524 if (host_cmd == -TARGET_EINVAL)
4525 return host_cmd;
4526
4527 switch(cmd) {
4528 case TARGET_F_GETLK:
4529 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4530 return -TARGET_EFAULT;
4531 fl.l_type = tswap16(target_fl->l_type);
4532 fl.l_whence = tswap16(target_fl->l_whence);
4533 fl.l_start = tswapal(target_fl->l_start);
4534 fl.l_len = tswapal(target_fl->l_len);
4535 fl.l_pid = tswap32(target_fl->l_pid);
4536 unlock_user_struct(target_fl, arg, 0);
4537 ret = get_errno(fcntl(fd, host_cmd, &fl));
4538 if (ret == 0) {
4539 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4540 return -TARGET_EFAULT;
4541 target_fl->l_type = tswap16(fl.l_type);
4542 target_fl->l_whence = tswap16(fl.l_whence);
4543 target_fl->l_start = tswapal(fl.l_start);
4544 target_fl->l_len = tswapal(fl.l_len);
4545 target_fl->l_pid = tswap32(fl.l_pid);
4546 unlock_user_struct(target_fl, arg, 1);
4547 }
4548 break;
4549
4550 case TARGET_F_SETLK:
4551 case TARGET_F_SETLKW:
4552 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4553 return -TARGET_EFAULT;
4554 fl.l_type = tswap16(target_fl->l_type);
4555 fl.l_whence = tswap16(target_fl->l_whence);
4556 fl.l_start = tswapal(target_fl->l_start);
4557 fl.l_len = tswapal(target_fl->l_len);
4558 fl.l_pid = tswap32(target_fl->l_pid);
4559 unlock_user_struct(target_fl, arg, 0);
4560 ret = get_errno(fcntl(fd, host_cmd, &fl));
4561 break;
4562
4563 case TARGET_F_GETLK64:
4564 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4565 return -TARGET_EFAULT;
4566 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4567 fl64.l_whence = tswap16(target_fl64->l_whence);
4568 fl64.l_start = tswap64(target_fl64->l_start);
4569 fl64.l_len = tswap64(target_fl64->l_len);
4570 fl64.l_pid = tswap32(target_fl64->l_pid);
4571 unlock_user_struct(target_fl64, arg, 0);
4572 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4573 if (ret == 0) {
4574 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4575 return -TARGET_EFAULT;
4576 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4577 target_fl64->l_whence = tswap16(fl64.l_whence);
4578 target_fl64->l_start = tswap64(fl64.l_start);
4579 target_fl64->l_len = tswap64(fl64.l_len);
4580 target_fl64->l_pid = tswap32(fl64.l_pid);
4581 unlock_user_struct(target_fl64, arg, 1);
4582 }
4583 break;
4584 case TARGET_F_SETLK64:
4585 case TARGET_F_SETLKW64:
4586 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4587 return -TARGET_EFAULT;
4588 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4589 fl64.l_whence = tswap16(target_fl64->l_whence);
4590 fl64.l_start = tswap64(target_fl64->l_start);
4591 fl64.l_len = tswap64(target_fl64->l_len);
4592 fl64.l_pid = tswap32(target_fl64->l_pid);
4593 unlock_user_struct(target_fl64, arg, 0);
4594 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4595 break;
4596
4597 case TARGET_F_GETFL:
4598 ret = get_errno(fcntl(fd, host_cmd, arg));
4599 if (ret >= 0) {
4600 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4601 }
4602 break;
4603
4604 case TARGET_F_SETFL:
4605 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4606 break;
4607
4608 case TARGET_F_SETOWN:
4609 case TARGET_F_GETOWN:
4610 case TARGET_F_SETSIG:
4611 case TARGET_F_GETSIG:
4612 case TARGET_F_SETLEASE:
4613 case TARGET_F_GETLEASE:
4614 ret = get_errno(fcntl(fd, host_cmd, arg));
4615 break;
4616
4617 default:
4618 ret = get_errno(fcntl(fd, cmd, arg));
4619 break;
4620 }
4621 return ret;
4622 }
4623
4624 #ifdef USE_UID16
4625
4626 static inline int high2lowuid(int uid)
4627 {
4628 if (uid > 65535)
4629 return 65534;
4630 else
4631 return uid;
4632 }
4633
4634 static inline int high2lowgid(int gid)
4635 {
4636 if (gid > 65535)
4637 return 65534;
4638 else
4639 return gid;
4640 }
4641
4642 static inline int low2highuid(int uid)
4643 {
4644 if ((int16_t)uid == -1)
4645 return -1;
4646 else
4647 return uid;
4648 }
4649
4650 static inline int low2highgid(int gid)
4651 {
4652 if ((int16_t)gid == -1)
4653 return -1;
4654 else
4655 return gid;
4656 }
4657 static inline int tswapid(int id)
4658 {
4659 return tswap16(id);
4660 }
4661 #else /* !USE_UID16 */
4662 static inline int high2lowuid(int uid)
4663 {
4664 return uid;
4665 }
4666 static inline int high2lowgid(int gid)
4667 {
4668 return gid;
4669 }
4670 static inline int low2highuid(int uid)
4671 {
4672 return uid;
4673 }
4674 static inline int low2highgid(int gid)
4675 {
4676 return gid;
4677 }
4678 static inline int tswapid(int id)
4679 {
4680 return tswap32(id);
4681 }
4682 #endif /* USE_UID16 */
4683
4684 void syscall_init(void)
4685 {
4686 IOCTLEntry *ie;
4687 const argtype *arg_type;
4688 int size;
4689 int i;
4690
4691 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4692 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4693 #include "syscall_types.h"
4694 #undef STRUCT
4695 #undef STRUCT_SPECIAL
4696
4697 /* Build target_to_host_errno_table[] table from
4698 * host_to_target_errno_table[]. */
4699 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4700 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4701 }
4702
4703 /* we patch the ioctl size if necessary. We rely on the fact that
4704 no ioctl has all the bits at '1' in the size field */
4705 ie = ioctl_entries;
4706 while (ie->target_cmd != 0) {
4707 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4708 TARGET_IOC_SIZEMASK) {
4709 arg_type = ie->arg_type;
4710 if (arg_type[0] != TYPE_PTR) {
4711 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4712 ie->target_cmd);
4713 exit(1);
4714 }
4715 arg_type++;
4716 size = thunk_type_size(arg_type, 0);
4717 ie->target_cmd = (ie->target_cmd &
4718 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4719 (size << TARGET_IOC_SIZESHIFT);
4720 }
4721
4722 /* automatic consistency check if same arch */
4723 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4724 (defined(__x86_64__) && defined(TARGET_X86_64))
4725 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4726 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4727 ie->name, ie->target_cmd, ie->host_cmd);
4728 }
4729 #endif
4730 ie++;
4731 }
4732 }
4733
4734 #if TARGET_ABI_BITS == 32
4735 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4736 {
4737 #ifdef TARGET_WORDS_BIGENDIAN
4738 return ((uint64_t)word0 << 32) | word1;
4739 #else
4740 return ((uint64_t)word1 << 32) | word0;
4741 #endif
4742 }
4743 #else /* TARGET_ABI_BITS == 32 */
4744 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4745 {
4746 return word0;
4747 }
4748 #endif /* TARGET_ABI_BITS != 32 */
4749
4750 #ifdef TARGET_NR_truncate64
4751 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4752 abi_long arg2,
4753 abi_long arg3,
4754 abi_long arg4)
4755 {
4756 if (regpairs_aligned(cpu_env)) {
4757 arg2 = arg3;
4758 arg3 = arg4;
4759 }
4760 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4761 }
4762 #endif
4763
4764 #ifdef TARGET_NR_ftruncate64
4765 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4766 abi_long arg2,
4767 abi_long arg3,
4768 abi_long arg4)
4769 {
4770 if (regpairs_aligned(cpu_env)) {
4771 arg2 = arg3;
4772 arg3 = arg4;
4773 }
4774 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4775 }
4776 #endif
4777
4778 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4779 abi_ulong target_addr)
4780 {
4781 struct target_timespec *target_ts;
4782
4783 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4784 return -TARGET_EFAULT;
4785 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4786 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4787 unlock_user_struct(target_ts, target_addr, 0);
4788 return 0;
4789 }
4790
4791 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4792 struct timespec *host_ts)
4793 {
4794 struct target_timespec *target_ts;
4795
4796 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4797 return -TARGET_EFAULT;
4798 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4799 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4800 unlock_user_struct(target_ts, target_addr, 1);
4801 return 0;
4802 }
4803
4804 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4805 static inline abi_long host_to_target_stat64(void *cpu_env,
4806 abi_ulong target_addr,
4807 struct stat *host_st)
4808 {
4809 #ifdef TARGET_ARM
4810 if (((CPUARMState *)cpu_env)->eabi) {
4811 struct target_eabi_stat64 *target_st;
4812
4813 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4814 return -TARGET_EFAULT;
4815 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4816 __put_user(host_st->st_dev, &target_st->st_dev);
4817 __put_user(host_st->st_ino, &target_st->st_ino);
4818 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4819 __put_user(host_st->st_ino, &target_st->__st_ino);
4820 #endif
4821 __put_user(host_st->st_mode, &target_st->st_mode);
4822 __put_user(host_st->st_nlink, &target_st->st_nlink);
4823 __put_user(host_st->st_uid, &target_st->st_uid);
4824 __put_user(host_st->st_gid, &target_st->st_gid);
4825 __put_user(host_st->st_rdev, &target_st->st_rdev);
4826 __put_user(host_st->st_size, &target_st->st_size);
4827 __put_user(host_st->st_blksize, &target_st->st_blksize);
4828 __put_user(host_st->st_blocks, &target_st->st_blocks);
4829 __put_user(host_st->st_atime, &target_st->target_st_atime);
4830 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4831 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4832 unlock_user_struct(target_st, target_addr, 1);
4833 } else
4834 #endif
4835 {
4836 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4837 struct target_stat *target_st;
4838 #else
4839 struct target_stat64 *target_st;
4840 #endif
4841
4842 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4843 return -TARGET_EFAULT;
4844 memset(target_st, 0, sizeof(*target_st));
4845 __put_user(host_st->st_dev, &target_st->st_dev);
4846 __put_user(host_st->st_ino, &target_st->st_ino);
4847 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4848 __put_user(host_st->st_ino, &target_st->__st_ino);
4849 #endif
4850 __put_user(host_st->st_mode, &target_st->st_mode);
4851 __put_user(host_st->st_nlink, &target_st->st_nlink);
4852 __put_user(host_st->st_uid, &target_st->st_uid);
4853 __put_user(host_st->st_gid, &target_st->st_gid);
4854 __put_user(host_st->st_rdev, &target_st->st_rdev);
4855 /* XXX: better use of kernel struct */
4856 __put_user(host_st->st_size, &target_st->st_size);
4857 __put_user(host_st->st_blksize, &target_st->st_blksize);
4858 __put_user(host_st->st_blocks, &target_st->st_blocks);
4859 __put_user(host_st->st_atime, &target_st->target_st_atime);
4860 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4861 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4862 unlock_user_struct(target_st, target_addr, 1);
4863 }
4864
4865 return 0;
4866 }
4867 #endif
4868
4869 #if defined(CONFIG_USE_NPTL)
4870 /* ??? Using host futex calls even when target atomic operations
4871 are not really atomic probably breaks things. However implementing
4872 futexes locally would make futexes shared between multiple processes
4873 tricky. However they're probably useless because guest atomic
4874 operations won't work either. */
4875 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4876 target_ulong uaddr2, int val3)
4877 {
4878 struct timespec ts, *pts;
4879 int base_op;
4880
4881 /* ??? We assume FUTEX_* constants are the same on both host
4882 and target. */
4883 #ifdef FUTEX_CMD_MASK
4884 base_op = op & FUTEX_CMD_MASK;
4885 #else
4886 base_op = op;
4887 #endif
4888 switch (base_op) {
4889 case FUTEX_WAIT:
4890 if (timeout) {
4891 pts = &ts;
4892 target_to_host_timespec(pts, timeout);
4893 } else {
4894 pts = NULL;
4895 }
4896 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4897 pts, NULL, 0));
4898 case FUTEX_WAKE:
4899 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4900 case FUTEX_FD:
4901 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4902 case FUTEX_REQUEUE:
4903 case FUTEX_CMP_REQUEUE:
4904 case FUTEX_WAKE_OP:
4905 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4906 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4907 But the prototype takes a `struct timespec *'; insert casts
4908 to satisfy the compiler. We do not need to tswap TIMEOUT
4909 since it's not compared to guest memory. */
4910 pts = (struct timespec *)(uintptr_t) timeout;
4911 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4912 g2h(uaddr2),
4913 (base_op == FUTEX_CMP_REQUEUE
4914 ? tswap32(val3)
4915 : val3)));
4916 default:
4917 return -TARGET_ENOSYS;
4918 }
4919 }
4920 #endif
4921
4922 /* Map host to target signal numbers for the wait family of syscalls.
4923 Assume all other status bits are the same. */
4924 int host_to_target_waitstatus(int status)
4925 {
4926 if (WIFSIGNALED(status)) {
4927 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4928 }
4929 if (WIFSTOPPED(status)) {
4930 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4931 | (status & 0xff);
4932 }
4933 return status;
4934 }
4935
4936 int get_osversion(void)
4937 {
4938 static int osversion;
4939 struct new_utsname buf;
4940 const char *s;
4941 int i, n, tmp;
4942 if (osversion)
4943 return osversion;
4944 if (qemu_uname_release && *qemu_uname_release) {
4945 s = qemu_uname_release;
4946 } else {
4947 if (sys_uname(&buf))
4948 return 0;
4949 s = buf.release;
4950 }
4951 tmp = 0;
4952 for (i = 0; i < 3; i++) {
4953 n = 0;
4954 while (*s >= '0' && *s <= '9') {
4955 n *= 10;
4956 n += *s - '0';
4957 s++;
4958 }
4959 tmp = (tmp << 8) + n;
4960 if (*s == '.')
4961 s++;
4962 }
4963 osversion = tmp;
4964 return osversion;
4965 }
4966
4967
4968 static int open_self_maps(void *cpu_env, int fd)
4969 {
4970 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4971 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4972 #endif
4973 FILE *fp;
4974 char *line = NULL;
4975 size_t len = 0;
4976 ssize_t read;
4977
4978 fp = fopen("/proc/self/maps", "r");
4979 if (fp == NULL) {
4980 return -EACCES;
4981 }
4982
4983 while ((read = getline(&line, &len, fp)) != -1) {
4984 int fields, dev_maj, dev_min, inode;
4985 uint64_t min, max, offset;
4986 char flag_r, flag_w, flag_x, flag_p;
4987 char path[512] = "";
4988 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4989 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4990 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4991
4992 if ((fields < 10) || (fields > 11)) {
4993 continue;
4994 }
4995 if (!strncmp(path, "[stack]", 7)) {
4996 continue;
4997 }
4998 if (h2g_valid(min) && h2g_valid(max)) {
4999 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5000 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n",
5001 h2g(min), h2g(max), flag_r, flag_w,
5002 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5003 path[0] ? " " : "", path);
5004 }
5005 }
5006
5007 free(line);
5008 fclose(fp);
5009
5010 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5011 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5012 (unsigned long long)ts->info->stack_limit,
5013 (unsigned long long)(ts->info->start_stack +
5014 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
5015 (unsigned long long)0);
5016 #endif
5017
5018 return 0;
5019 }
5020
5021 static int open_self_stat(void *cpu_env, int fd)
5022 {
5023 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5024 abi_ulong start_stack = ts->info->start_stack;
5025 int i;
5026
5027 for (i = 0; i < 44; i++) {
5028 char buf[128];
5029 int len;
5030 uint64_t val = 0;
5031
5032 if (i == 0) {
5033 /* pid */
5034 val = getpid();
5035 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5036 } else if (i == 1) {
5037 /* app name */
5038 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5039 } else if (i == 27) {
5040 /* stack bottom */
5041 val = start_stack;
5042 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5043 } else {
5044 /* for the rest, there is MasterCard */
5045 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5046 }
5047
5048 len = strlen(buf);
5049 if (write(fd, buf, len) != len) {
5050 return -1;
5051 }
5052 }
5053
5054 return 0;
5055 }
5056
5057 static int open_self_auxv(void *cpu_env, int fd)
5058 {
5059 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5060 abi_ulong auxv = ts->info->saved_auxv;
5061 abi_ulong len = ts->info->auxv_len;
5062 char *ptr;
5063
5064 /*
5065 * Auxiliary vector is stored in target process stack.
5066 * read in whole auxv vector and copy it to file
5067 */
5068 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5069 if (ptr != NULL) {
5070 while (len > 0) {
5071 ssize_t r;
5072 r = write(fd, ptr, len);
5073 if (r <= 0) {
5074 break;
5075 }
5076 len -= r;
5077 ptr += r;
5078 }
5079 lseek(fd, 0, SEEK_SET);
5080 unlock_user(ptr, auxv, len);
5081 }
5082
5083 return 0;
5084 }
5085
5086 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5087 {
5088 struct fake_open {
5089 const char *filename;
5090 int (*fill)(void *cpu_env, int fd);
5091 };
5092 const struct fake_open *fake_open;
5093 static const struct fake_open fakes[] = {
5094 { "/proc/self/maps", open_self_maps },
5095 { "/proc/self/stat", open_self_stat },
5096 { "/proc/self/auxv", open_self_auxv },
5097 { NULL, NULL }
5098 };
5099
5100 for (fake_open = fakes; fake_open->filename; fake_open++) {
5101 if (!strncmp(pathname, fake_open->filename,
5102 strlen(fake_open->filename))) {
5103 break;
5104 }
5105 }
5106
5107 if (fake_open->filename) {
5108 const char *tmpdir;
5109 char filename[PATH_MAX];
5110 int fd, r;
5111
5112 /* create temporary file to map stat to */
5113 tmpdir = getenv("TMPDIR");
5114 if (!tmpdir)
5115 tmpdir = "/tmp";
5116 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5117 fd = mkstemp(filename);
5118 if (fd < 0) {
5119 return fd;
5120 }
5121 unlink(filename);
5122
5123 if ((r = fake_open->fill(cpu_env, fd))) {
5124 close(fd);
5125 return r;
5126 }
5127 lseek(fd, 0, SEEK_SET);
5128
5129 return fd;
5130 }
5131
5132 return get_errno(open(path(pathname), flags, mode));
5133 }
5134
5135 /* do_syscall() should always have a single exit point at the end so
5136 that actions, such as logging of syscall results, can be performed.
5137 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5138 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5139 abi_long arg2, abi_long arg3, abi_long arg4,
5140 abi_long arg5, abi_long arg6, abi_long arg7,
5141 abi_long arg8)
5142 {
5143 abi_long ret;
5144 struct stat st;
5145 struct statfs stfs;
5146 void *p;
5147
5148 #ifdef DEBUG
5149 gemu_log("syscall %d", num);
5150 #endif
5151 if(do_strace)
5152 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5153
5154 switch(num) {
5155 case TARGET_NR_exit:
5156 #ifdef CONFIG_USE_NPTL
5157 /* In old applications this may be used to implement _exit(2).
5158 However in threaded applictions it is used for thread termination,
5159 and _exit_group is used for application termination.
5160 Do thread termination if we have more then one thread. */
5161 /* FIXME: This probably breaks if a signal arrives. We should probably
5162 be disabling signals. */
5163 if (first_cpu->next_cpu) {
5164 TaskState *ts;
5165 CPUArchState **lastp;
5166 CPUArchState *p;
5167
5168 cpu_list_lock();
5169 lastp = &first_cpu;
5170 p = first_cpu;
5171 while (p && p != (CPUArchState *)cpu_env) {
5172 lastp = &p->next_cpu;
5173 p = p->next_cpu;
5174 }
5175 /* If we didn't find the CPU for this thread then something is
5176 horribly wrong. */
5177 if (!p)
5178 abort();
5179 /* Remove the CPU from the list. */
5180 *lastp = p->next_cpu;
5181 cpu_list_unlock();
5182 ts = ((CPUArchState *)cpu_env)->opaque;
5183 if (ts->child_tidptr) {
5184 put_user_u32(0, ts->child_tidptr);
5185 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5186 NULL, NULL, 0);
5187 }
5188 thread_env = NULL;
5189 object_delete(OBJECT(ENV_GET_CPU(cpu_env)));
5190 g_free(ts);
5191 pthread_exit(NULL);
5192 }
5193 #endif
5194 #ifdef TARGET_GPROF
5195 _mcleanup();
5196 #endif
5197 gdb_exit(cpu_env, arg1);
5198 _exit(arg1);
5199 ret = 0; /* avoid warning */
5200 break;
5201 case TARGET_NR_read:
5202 if (arg3 == 0)
5203 ret = 0;
5204 else {
5205 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5206 goto efault;
5207 ret = get_errno(read(arg1, p, arg3));
5208 unlock_user(p, arg2, ret);
5209 }
5210 break;
5211 case TARGET_NR_write:
5212 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5213 goto efault;
5214 ret = get_errno(write(arg1, p, arg3));
5215 unlock_user(p, arg2, 0);
5216 break;
5217 case TARGET_NR_open:
5218 if (!(p = lock_user_string(arg1)))
5219 goto efault;
5220 ret = get_errno(do_open(cpu_env, p,
5221 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5222 arg3));
5223 unlock_user(p, arg1, 0);
5224 break;
5225 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5226 case TARGET_NR_openat:
5227 if (!(p = lock_user_string(arg2)))
5228 goto efault;
5229 ret = get_errno(sys_openat(arg1,
5230 path(p),
5231 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5232 arg4));
5233 unlock_user(p, arg2, 0);
5234 break;
5235 #endif
5236 case TARGET_NR_close:
5237 ret = get_errno(close(arg1));
5238 break;
5239 case TARGET_NR_brk:
5240 ret = do_brk(arg1);
5241 break;
5242 case TARGET_NR_fork:
5243 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5244 break;
5245 #ifdef TARGET_NR_waitpid
5246 case TARGET_NR_waitpid:
5247 {
5248 int status;
5249 ret = get_errno(waitpid(arg1, &status, arg3));
5250 if (!is_error(ret) && arg2 && ret
5251 && put_user_s32(host_to_target_waitstatus(status), arg2))
5252 goto efault;
5253 }
5254 break;
5255 #endif
5256 #ifdef TARGET_NR_waitid
5257 case TARGET_NR_waitid:
5258 {
5259 siginfo_t info;
5260 info.si_pid = 0;
5261 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5262 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5263 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5264 goto efault;
5265 host_to_target_siginfo(p, &info);
5266 unlock_user(p, arg3, sizeof(target_siginfo_t));
5267 }
5268 }
5269 break;
5270 #endif
5271 #ifdef TARGET_NR_creat /* not on alpha */
5272 case TARGET_NR_creat:
5273 if (!(p = lock_user_string(arg1)))
5274 goto efault;
5275 ret = get_errno(creat(p, arg2));
5276 unlock_user(p, arg1, 0);
5277 break;
5278 #endif
5279 case TARGET_NR_link:
5280 {
5281 void * p2;
5282 p = lock_user_string(arg1);
5283 p2 = lock_user_string(arg2);
5284 if (!p || !p2)
5285 ret = -TARGET_EFAULT;
5286 else
5287 ret = get_errno(link(p, p2));
5288 unlock_user(p2, arg2, 0);
5289 unlock_user(p, arg1, 0);
5290 }
5291 break;
5292 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5293 case TARGET_NR_linkat:
5294 {
5295 void * p2 = NULL;
5296 if (!arg2 || !arg4)
5297 goto efault;
5298 p = lock_user_string(arg2);
5299 p2 = lock_user_string(arg4);
5300 if (!p || !p2)
5301 ret = -TARGET_EFAULT;
5302 else
5303 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
5304 unlock_user(p, arg2, 0);
5305 unlock_user(p2, arg4, 0);
5306 }
5307 break;
5308 #endif
5309 case TARGET_NR_unlink:
5310 if (!(p = lock_user_string(arg1)))
5311 goto efault;
5312 ret = get_errno(unlink(p));
5313 unlock_user(p, arg1, 0);
5314 break;
5315 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5316 case TARGET_NR_unlinkat:
5317 if (!(p = lock_user_string(arg2)))
5318 goto efault;
5319 ret = get_errno(sys_unlinkat(arg1, p, arg3));
5320 unlock_user(p, arg2, 0);
5321 break;
5322 #endif
5323 case TARGET_NR_execve:
5324 {
5325 char **argp, **envp;
5326 int argc, envc;
5327 abi_ulong gp;
5328 abi_ulong guest_argp;
5329 abi_ulong guest_envp;
5330 abi_ulong addr;
5331 char **q;
5332 int total_size = 0;
5333
5334 argc = 0;
5335 guest_argp = arg2;
5336 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5337 if (get_user_ual(addr, gp))
5338 goto efault;
5339 if (!addr)
5340 break;
5341 argc++;
5342 }
5343 envc = 0;
5344 guest_envp = arg3;
5345 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5346 if (get_user_ual(addr, gp))
5347 goto efault;
5348 if (!addr)
5349 break;
5350 envc++;
5351 }
5352
5353 argp = alloca((argc + 1) * sizeof(void *));
5354 envp = alloca((envc + 1) * sizeof(void *));
5355
5356 for (gp = guest_argp, q = argp; gp;
5357 gp += sizeof(abi_ulong), q++) {
5358 if (get_user_ual(addr, gp))
5359 goto execve_efault;
5360 if (!addr)
5361 break;
5362 if (!(*q = lock_user_string(addr)))
5363 goto execve_efault;
5364 total_size += strlen(*q) + 1;
5365 }
5366 *q = NULL;
5367
5368 for (gp = guest_envp, q = envp; gp;
5369 gp += sizeof(abi_ulong), q++) {
5370 if (get_user_ual(addr, gp))
5371 goto execve_efault;
5372 if (!addr)
5373 break;
5374 if (!(*q = lock_user_string(addr)))
5375 goto execve_efault;
5376 total_size += strlen(*q) + 1;
5377 }
5378 *q = NULL;
5379
5380 /* This case will not be caught by the host's execve() if its
5381 page size is bigger than the target's. */
5382 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5383 ret = -TARGET_E2BIG;
5384 goto execve_end;
5385 }
5386 if (!(p = lock_user_string(arg1)))
5387 goto execve_efault;
5388 ret = get_errno(execve(p, argp, envp));
5389 unlock_user(p, arg1, 0);
5390
5391 goto execve_end;
5392
5393 execve_efault:
5394 ret = -TARGET_EFAULT;
5395
5396 execve_end:
5397 for (gp = guest_argp, q = argp; *q;
5398 gp += sizeof(abi_ulong), q++) {
5399 if (get_user_ual(addr, gp)
5400 || !addr)
5401 break;
5402 unlock_user(*q, addr, 0);
5403 }
5404 for (gp = guest_envp, q = envp; *q;
5405 gp += sizeof(abi_ulong), q++) {
5406 if (get_user_ual(addr, gp)
5407 || !addr)
5408 break;
5409 unlock_user(*q, addr, 0);
5410 }
5411 }
5412 break;
5413 case TARGET_NR_chdir:
5414 if (!(p = lock_user_string(arg1)))
5415 goto efault;
5416 ret = get_errno(chdir(p));
5417 unlock_user(p, arg1, 0);
5418 break;
5419 #ifdef TARGET_NR_time
5420 case TARGET_NR_time:
5421 {
5422 time_t host_time;
5423 ret = get_errno(time(&host_time));
5424 if (!is_error(ret)
5425 && arg1
5426 && put_user_sal(host_time, arg1))
5427 goto efault;
5428 }
5429 break;
5430 #endif
5431 case TARGET_NR_mknod:
5432 if (!(p = lock_user_string(arg1)))
5433 goto efault;
5434 ret = get_errno(mknod(p, arg2, arg3));
5435 unlock_user(p, arg1, 0);
5436 break;
5437 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5438 case TARGET_NR_mknodat:
5439 if (!(p = lock_user_string(arg2)))
5440 goto efault;
5441 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5442 unlock_user(p, arg2, 0);
5443 break;
5444 #endif
5445 case TARGET_NR_chmod:
5446 if (!(p = lock_user_string(arg1)))
5447 goto efault;
5448 ret = get_errno(chmod(p, arg2));
5449 unlock_user(p, arg1, 0);
5450 break;
5451 #ifdef TARGET_NR_break
5452 case TARGET_NR_break:
5453 goto unimplemented;
5454 #endif
5455 #ifdef TARGET_NR_oldstat
5456 case TARGET_NR_oldstat:
5457 goto unimplemented;
5458 #endif
5459 case TARGET_NR_lseek:
5460 ret = get_errno(lseek(arg1, arg2, arg3));
5461 break;
5462 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5463 /* Alpha specific */
5464 case TARGET_NR_getxpid:
5465 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5466 ret = get_errno(getpid());
5467 break;
5468 #endif
5469 #ifdef TARGET_NR_getpid
5470 case TARGET_NR_getpid:
5471 ret = get_errno(getpid());
5472 break;
5473 #endif
5474 case TARGET_NR_mount:
5475 {
5476 /* need to look at the data field */
5477 void *p2, *p3;
5478 p = lock_user_string(arg1);
5479 p2 = lock_user_string(arg2);
5480 p3 = lock_user_string(arg3);
5481 if (!p || !p2 || !p3)
5482 ret = -TARGET_EFAULT;
5483 else {
5484 /* FIXME - arg5 should be locked, but it isn't clear how to
5485 * do that since it's not guaranteed to be a NULL-terminated
5486 * string.
5487 */
5488 if ( ! arg5 )
5489 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5490 else
5491 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5492 }
5493 unlock_user(p, arg1, 0);
5494 unlock_user(p2, arg2, 0);
5495 unlock_user(p3, arg3, 0);
5496 break;
5497 }
5498 #ifdef TARGET_NR_umount
5499 case TARGET_NR_umount:
5500 if (!(p = lock_user_string(arg1)))
5501 goto efault;
5502 ret = get_errno(umount(p));
5503 unlock_user(p, arg1, 0);
5504 break;
5505 #endif
5506 #ifdef TARGET_NR_stime /* not on alpha */
5507 case TARGET_NR_stime:
5508 {
5509 time_t host_time;
5510 if (get_user_sal(host_time, arg1))
5511 goto efault;
5512 ret = get_errno(stime(&host_time));
5513 }
5514 break;
5515 #endif
5516 case TARGET_NR_ptrace:
5517 goto unimplemented;
5518 #ifdef TARGET_NR_alarm /* not on alpha */
5519 case TARGET_NR_alarm:
5520 ret = alarm(arg1);
5521 break;
5522 #endif
5523 #ifdef TARGET_NR_oldfstat
5524 case TARGET_NR_oldfstat:
5525 goto unimplemented;
5526 #endif
5527 #ifdef TARGET_NR_pause /* not on alpha */
5528 case TARGET_NR_pause:
5529 ret = get_errno(pause());
5530 break;
5531 #endif
5532 #ifdef TARGET_NR_utime
5533 case TARGET_NR_utime:
5534 {
5535 struct utimbuf tbuf, *host_tbuf;
5536 struct target_utimbuf *target_tbuf;
5537 if (arg2) {
5538 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5539 goto efault;
5540 tbuf.actime = tswapal(target_tbuf->actime);
5541 tbuf.modtime = tswapal(target_tbuf->modtime);
5542 unlock_user_struct(target_tbuf, arg2, 0);
5543 host_tbuf = &tbuf;
5544 } else {
5545 host_tbuf = NULL;
5546 }
5547 if (!(p = lock_user_string(arg1)))
5548 goto efault;
5549 ret = get_errno(utime(p, host_tbuf));
5550 unlock_user(p, arg1, 0);
5551 }
5552 break;
5553 #endif
5554 case TARGET_NR_utimes:
5555 {
5556 struct timeval *tvp, tv[2];
5557 if (arg2) {
5558 if (copy_from_user_timeval(&tv[0], arg2)
5559 || copy_from_user_timeval(&tv[1],
5560 arg2 + sizeof(struct target_timeval)))
5561 goto efault;
5562 tvp = tv;
5563 } else {
5564 tvp = NULL;
5565 }
5566 if (!(p = lock_user_string(arg1)))
5567 goto efault;
5568 ret = get_errno(utimes(p, tvp));
5569 unlock_user(p, arg1, 0);
5570 }
5571 break;
5572 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5573 case TARGET_NR_futimesat:
5574 {
5575 struct timeval *tvp, tv[2];
5576 if (arg3) {
5577 if (copy_from_user_timeval(&tv[0], arg3)
5578 || copy_from_user_timeval(&tv[1],
5579 arg3 + sizeof(struct target_timeval)))
5580 goto efault;
5581 tvp = tv;
5582 } else {
5583 tvp = NULL;
5584 }
5585 if (!(p = lock_user_string(arg2)))
5586 goto efault;
5587 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5588 unlock_user(p, arg2, 0);
5589 }
5590 break;
5591 #endif
5592 #ifdef TARGET_NR_stty
5593 case TARGET_NR_stty:
5594 goto unimplemented;
5595 #endif
5596 #ifdef TARGET_NR_gtty
5597 case TARGET_NR_gtty:
5598 goto unimplemented;
5599 #endif
5600 case TARGET_NR_access:
5601 if (!(p = lock_user_string(arg1)))
5602 goto efault;
5603 ret = get_errno(access(path(p), arg2));
5604 unlock_user(p, arg1, 0);
5605 break;
5606 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5607 case TARGET_NR_faccessat:
5608 if (!(p = lock_user_string(arg2)))
5609 goto efault;
5610 ret = get_errno(sys_faccessat(arg1, p, arg3));
5611 unlock_user(p, arg2, 0);
5612 break;
5613 #endif
5614 #ifdef TARGET_NR_nice /* not on alpha */
5615 case TARGET_NR_nice:
5616 ret = get_errno(nice(arg1));
5617 break;
5618 #endif
5619 #ifdef TARGET_NR_ftime
5620 case TARGET_NR_ftime:
5621 goto unimplemented;
5622 #endif
5623 case TARGET_NR_sync:
5624 sync();
5625 ret = 0;
5626 break;
5627 case TARGET_NR_kill:
5628 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5629 break;
5630 case TARGET_NR_rename:
5631 {
5632 void *p2;
5633 p = lock_user_string(arg1);
5634 p2 = lock_user_string(arg2);
5635 if (!p || !p2)
5636 ret = -TARGET_EFAULT;
5637 else
5638 ret = get_errno(rename(p, p2));
5639 unlock_user(p2, arg2, 0);
5640 unlock_user(p, arg1, 0);
5641 }
5642 break;
5643 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5644 case TARGET_NR_renameat:
5645 {
5646 void *p2;
5647 p = lock_user_string(arg2);
5648 p2 = lock_user_string(arg4);
5649 if (!p || !p2)
5650 ret = -TARGET_EFAULT;
5651 else
5652 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5653 unlock_user(p2, arg4, 0);
5654 unlock_user(p, arg2, 0);
5655 }
5656 break;
5657 #endif
5658 case TARGET_NR_mkdir:
5659 if (!(p = lock_user_string(arg1)))
5660 goto efault;
5661 ret = get_errno(mkdir(p, arg2));
5662 unlock_user(p, arg1, 0);
5663 break;
5664 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5665 case TARGET_NR_mkdirat:
5666 if (!(p = lock_user_string(arg2)))
5667 goto efault;
5668 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5669 unlock_user(p, arg2, 0);
5670 break;
5671 #endif
5672 case TARGET_NR_rmdir:
5673 if (!(p = lock_user_string(arg1)))
5674 goto efault;
5675 ret = get_errno(rmdir(p));
5676 unlock_user(p, arg1, 0);
5677 break;
5678 case TARGET_NR_dup:
5679 ret = get_errno(dup(arg1));
5680 break;
5681 case TARGET_NR_pipe:
5682 ret = do_pipe(cpu_env, arg1, 0, 0);
5683 break;
5684 #ifdef TARGET_NR_pipe2
5685 case TARGET_NR_pipe2:
5686 ret = do_pipe(cpu_env, arg1,
5687 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5688 break;
5689 #endif
5690 case TARGET_NR_times:
5691 {
5692 struct target_tms *tmsp;
5693 struct tms tms;
5694 ret = get_errno(times(&tms));
5695 if (arg1) {
5696 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5697 if (!tmsp)
5698 goto efault;
5699 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5700 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5701 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5702 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5703 }
5704 if (!is_error(ret))
5705 ret = host_to_target_clock_t(ret);
5706 }
5707 break;
5708 #ifdef TARGET_NR_prof
5709 case TARGET_NR_prof:
5710 goto unimplemented;
5711 #endif
5712 #ifdef TARGET_NR_signal
5713 case TARGET_NR_signal:
5714 goto unimplemented;
5715 #endif
5716 case TARGET_NR_acct:
5717 if (arg1 == 0) {
5718 ret = get_errno(acct(NULL));
5719 } else {
5720 if (!(p = lock_user_string(arg1)))
5721 goto efault;
5722 ret = get_errno(acct(path(p)));
5723 unlock_user(p, arg1, 0);
5724 }
5725 break;
5726 #ifdef TARGET_NR_umount2 /* not on alpha */
5727 case TARGET_NR_umount2:
5728 if (!(p = lock_user_string(arg1)))
5729 goto efault;
5730 ret = get_errno(umount2(p, arg2));
5731 unlock_user(p, arg1, 0);
5732 break;
5733 #endif
5734 #ifdef TARGET_NR_lock
5735 case TARGET_NR_lock:
5736 goto unimplemented;
5737 #endif
5738 case TARGET_NR_ioctl:
5739 ret = do_ioctl(arg1, arg2, arg3);
5740 break;
5741 case TARGET_NR_fcntl:
5742 ret = do_fcntl(arg1, arg2, arg3);
5743 break;
5744 #ifdef TARGET_NR_mpx
5745 case TARGET_NR_mpx:
5746 goto unimplemented;
5747 #endif
5748 case TARGET_NR_setpgid:
5749 ret = get_errno(setpgid(arg1, arg2));
5750 break;
5751 #ifdef TARGET_NR_ulimit
5752 case TARGET_NR_ulimit:
5753 goto unimplemented;
5754 #endif
5755 #ifdef TARGET_NR_oldolduname
5756 case TARGET_NR_oldolduname:
5757 goto unimplemented;
5758 #endif
5759 case TARGET_NR_umask:
5760 ret = get_errno(umask(arg1));
5761 break;
5762 case TARGET_NR_chroot:
5763 if (!(p = lock_user_string(arg1)))
5764 goto efault;
5765 ret = get_errno(chroot(p));
5766 unlock_user(p, arg1, 0);
5767 break;
5768 case TARGET_NR_ustat:
5769 goto unimplemented;
5770 case TARGET_NR_dup2:
5771 ret = get_errno(dup2(arg1, arg2));
5772 break;
5773 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5774 case TARGET_NR_dup3:
5775 ret = get_errno(dup3(arg1, arg2, arg3));
5776 break;
5777 #endif
5778 #ifdef TARGET_NR_getppid /* not on alpha */
5779 case TARGET_NR_getppid:
5780 ret = get_errno(getppid());
5781 break;
5782 #endif
5783 case TARGET_NR_getpgrp:
5784 ret = get_errno(getpgrp());
5785 break;
5786 case TARGET_NR_setsid:
5787 ret = get_errno(setsid());
5788 break;
5789 #ifdef TARGET_NR_sigaction
5790 case TARGET_NR_sigaction:
5791 {
5792 #if defined(TARGET_ALPHA)
5793 struct target_sigaction act, oact, *pact = 0;
5794 struct target_old_sigaction *old_act;
5795 if (arg2) {
5796 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5797 goto efault;
5798 act._sa_handler = old_act->_sa_handler;
5799 target_siginitset(&act.sa_mask, old_act->sa_mask);
5800 act.sa_flags = old_act->sa_flags;
5801 act.sa_restorer = 0;
5802 unlock_user_struct(old_act, arg2, 0);
5803 pact = &act;
5804 }
5805 ret = get_errno(do_sigaction(arg1, pact, &oact));
5806 if (!is_error(ret) && arg3) {
5807 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5808 goto efault;
5809 old_act->_sa_handler = oact._sa_handler;
5810 old_act->sa_mask = oact.sa_mask.sig[0];
5811 old_act->sa_flags = oact.sa_flags;
5812 unlock_user_struct(old_act, arg3, 1);
5813 }
5814 #elif defined(TARGET_MIPS)
5815 struct target_sigaction act, oact, *pact, *old_act;
5816
5817 if (arg2) {
5818 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5819 goto efault;
5820 act._sa_handler = old_act->_sa_handler;
5821 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5822 act.sa_flags = old_act->sa_flags;
5823 unlock_user_struct(old_act, arg2, 0);
5824 pact = &act;
5825 } else {
5826 pact = NULL;
5827 }
5828
5829 ret = get_errno(do_sigaction(arg1, pact, &oact));
5830
5831 if (!is_error(ret) && arg3) {
5832 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5833 goto efault;
5834 old_act->_sa_handler = oact._sa_handler;
5835 old_act->sa_flags = oact.sa_flags;
5836 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5837 old_act->sa_mask.sig[1] = 0;
5838 old_act->sa_mask.sig[2] = 0;
5839 old_act->sa_mask.sig[3] = 0;
5840 unlock_user_struct(old_act, arg3, 1);
5841 }
5842 #else
5843 struct target_old_sigaction *old_act;
5844 struct target_sigaction act, oact, *pact;
5845 if (arg2) {
5846 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5847 goto efault;
5848 act._sa_handler = old_act->_sa_handler;
5849 target_siginitset(&act.sa_mask, old_act->sa_mask);
5850 act.sa_flags = old_act->sa_flags;
5851 act.sa_restorer = old_act->sa_restorer;
5852 unlock_user_struct(old_act, arg2, 0);
5853 pact = &act;
5854 } else {
5855 pact = NULL;
5856 }
5857 ret = get_errno(do_sigaction(arg1, pact, &oact));
5858 if (!is_error(ret) && arg3) {
5859 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5860 goto efault;
5861 old_act->_sa_handler = oact._sa_handler;
5862 old_act->sa_mask = oact.sa_mask.sig[0];
5863 old_act->sa_flags = oact.sa_flags;
5864 old_act->sa_restorer = oact.sa_restorer;
5865 unlock_user_struct(old_act, arg3, 1);
5866 }
5867 #endif
5868 }
5869 break;
5870 #endif
5871 case TARGET_NR_rt_sigaction:
5872 {
5873 #if defined(TARGET_ALPHA)
5874 struct target_sigaction act, oact, *pact = 0;
5875 struct target_rt_sigaction *rt_act;
5876 /* ??? arg4 == sizeof(sigset_t). */
5877 if (arg2) {
5878 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5879 goto efault;
5880 act._sa_handler = rt_act->_sa_handler;
5881 act.sa_mask = rt_act->sa_mask;
5882 act.sa_flags = rt_act->sa_flags;
5883 act.sa_restorer = arg5;
5884 unlock_user_struct(rt_act, arg2, 0);
5885 pact = &act;
5886 }
5887 ret = get_errno(do_sigaction(arg1, pact, &oact));
5888 if (!is_error(ret) && arg3) {
5889 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5890 goto efault;
5891 rt_act->_sa_handler = oact._sa_handler;
5892 rt_act->sa_mask = oact.sa_mask;
5893 rt_act->sa_flags = oact.sa_flags;
5894 unlock_user_struct(rt_act, arg3, 1);
5895 }
5896 #else
5897 struct target_sigaction *act;
5898 struct target_sigaction *oact;
5899
5900 if (arg2) {
5901 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5902 goto efault;
5903 } else
5904 act = NULL;
5905 if (arg3) {
5906 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5907 ret = -TARGET_EFAULT;
5908 goto rt_sigaction_fail;
5909 }
5910 } else
5911 oact = NULL;
5912 ret = get_errno(do_sigaction(arg1, act, oact));
5913 rt_sigaction_fail:
5914 if (act)
5915 unlock_user_struct(act, arg2, 0);
5916 if (oact)
5917 unlock_user_struct(oact, arg3, 1);
5918 #endif
5919 }
5920 break;
5921 #ifdef TARGET_NR_sgetmask /* not on alpha */
5922 case TARGET_NR_sgetmask:
5923 {
5924 sigset_t cur_set;
5925 abi_ulong target_set;
5926 sigprocmask(0, NULL, &cur_set);
5927 host_to_target_old_sigset(&target_set, &cur_set);
5928 ret = target_set;
5929 }
5930 break;
5931 #endif
5932 #ifdef TARGET_NR_ssetmask /* not on alpha */
5933 case TARGET_NR_ssetmask:
5934 {
5935 sigset_t set, oset, cur_set;
5936 abi_ulong target_set = arg1;
5937 sigprocmask(0, NULL, &cur_set);
5938 target_to_host_old_sigset(&set, &target_set);
5939 sigorset(&set, &set, &cur_set);
5940 sigprocmask(SIG_SETMASK, &set, &oset);
5941 host_to_target_old_sigset(&target_set, &oset);
5942 ret = target_set;
5943 }
5944 break;
5945 #endif
5946 #ifdef TARGET_NR_sigprocmask
5947 case TARGET_NR_sigprocmask:
5948 {
5949 #if defined(TARGET_ALPHA)
5950 sigset_t set, oldset;
5951 abi_ulong mask;
5952 int how;
5953
5954 switch (arg1) {
5955 case TARGET_SIG_BLOCK:
5956 how = SIG_BLOCK;
5957 break;
5958 case TARGET_SIG_UNBLOCK:
5959 how = SIG_UNBLOCK;
5960 break;
5961 case TARGET_SIG_SETMASK:
5962 how = SIG_SETMASK;
5963 break;
5964 default:
5965 ret = -TARGET_EINVAL;
5966 goto fail;
5967 }
5968 mask = arg2;
5969 target_to_host_old_sigset(&set, &mask);
5970
5971 ret = get_errno(sigprocmask(how, &set, &oldset));
5972 if (!is_error(ret)) {
5973 host_to_target_old_sigset(&mask, &oldset);
5974 ret = mask;
5975 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5976 }
5977 #else
5978 sigset_t set, oldset, *set_ptr;
5979 int how;
5980
5981 if (arg2) {
5982 switch (arg1) {
5983 case TARGET_SIG_BLOCK:
5984 how = SIG_BLOCK;
5985 break;
5986 case TARGET_SIG_UNBLOCK:
5987 how = SIG_UNBLOCK;
5988 break;
5989 case TARGET_SIG_SETMASK:
5990 how = SIG_SETMASK;
5991 break;
5992 default:
5993 ret = -TARGET_EINVAL;
5994 goto fail;
5995 }
5996 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5997 goto efault;
5998 target_to_host_old_sigset(&set, p);
5999 unlock_user(p, arg2, 0);
6000 set_ptr = &set;
6001 } else {
6002 how = 0;
6003 set_ptr = NULL;
6004 }
6005 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6006 if (!is_error(ret) && arg3) {
6007 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6008 goto efault;
6009 host_to_target_old_sigset(p, &oldset);
6010 unlock_user(p, arg3, sizeof(target_sigset_t));
6011 }
6012 #endif
6013 }
6014 break;
6015 #endif
6016 case TARGET_NR_rt_sigprocmask:
6017 {
6018 int how = arg1;
6019 sigset_t set, oldset, *set_ptr;
6020
6021 if (arg2) {
6022 switch(how) {
6023 case TARGET_SIG_BLOCK:
6024 how = SIG_BLOCK;
6025 break;
6026 case TARGET_SIG_UNBLOCK:
6027 how = SIG_UNBLOCK;
6028 break;
6029 case TARGET_SIG_SETMASK:
6030 how = SIG_SETMASK;
6031 break;
6032 default:
6033 ret = -TARGET_EINVAL;
6034 goto fail;
6035 }
6036 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6037 goto efault;
6038 target_to_host_sigset(&set, p);
6039 unlock_user(p, arg2, 0);
6040 set_ptr = &set;
6041 } else {
6042 how = 0;
6043 set_ptr = NULL;
6044 }
6045 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6046 if (!is_error(ret) && arg3) {
6047 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6048 goto efault;
6049 host_to_target_sigset(p, &oldset);
6050 unlock_user(p, arg3, sizeof(target_sigset_t));
6051 }
6052 }
6053 break;
6054 #ifdef TARGET_NR_sigpending
6055 case TARGET_NR_sigpending:
6056 {
6057 sigset_t set;
6058 ret = get_errno(sigpending(&set));
6059 if (!is_error(ret)) {
6060 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6061 goto efault;
6062 host_to_target_old_sigset(p, &set);
6063 unlock_user(p, arg1, sizeof(target_sigset_t));
6064 }
6065 }
6066 break;
6067 #endif
6068 case TARGET_NR_rt_sigpending:
6069 {
6070 sigset_t set;
6071 ret = get_errno(sigpending(&set));
6072 if (!is_error(ret)) {
6073 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6074 goto efault;
6075 host_to_target_sigset(p, &set);
6076 unlock_user(p, arg1, sizeof(target_sigset_t));
6077 }
6078 }
6079 break;
6080 #ifdef TARGET_NR_sigsuspend
6081 case TARGET_NR_sigsuspend:
6082 {
6083 sigset_t set;
6084 #if defined(TARGET_ALPHA)
6085 abi_ulong mask = arg1;
6086 target_to_host_old_sigset(&set, &mask);
6087 #else
6088 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6089 goto efault;
6090 target_to_host_old_sigset(&set, p);
6091 unlock_user(p, arg1, 0);
6092 #endif
6093 ret = get_errno(sigsuspend(&set));
6094 }
6095 break;
6096 #endif
6097 case TARGET_NR_rt_sigsuspend:
6098 {
6099 sigset_t set;
6100 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6101 goto efault;
6102 target_to_host_sigset(&set, p);
6103 unlock_user(p, arg1, 0);
6104 ret = get_errno(sigsuspend(&set));
6105 }
6106 break;
6107 case TARGET_NR_rt_sigtimedwait:
6108 {
6109 sigset_t set;
6110 struct timespec uts, *puts;
6111 siginfo_t uinfo;
6112
6113 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6114 goto efault;
6115 target_to_host_sigset(&set, p);
6116 unlock_user(p, arg1, 0);
6117 if (arg3) {
6118 puts = &uts;
6119 target_to_host_timespec(puts, arg3);
6120 } else {
6121 puts = NULL;
6122 }
6123 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6124 if (!is_error(ret) && arg2) {
6125 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6126 goto efault;
6127 host_to_target_siginfo(p, &uinfo);
6128 unlock_user(p, arg2, sizeof(target_siginfo_t));
6129 }
6130 }
6131 break;
6132 case TARGET_NR_rt_sigqueueinfo:
6133 {
6134 siginfo_t uinfo;
6135 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6136 goto efault;
6137 target_to_host_siginfo(&uinfo, p);
6138 unlock_user(p, arg1, 0);
6139 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6140 }
6141 break;
6142 #ifdef TARGET_NR_sigreturn
6143 case TARGET_NR_sigreturn:
6144 /* NOTE: ret is eax, so not transcoding must be done */
6145 ret = do_sigreturn(cpu_env);
6146 break;
6147 #endif
6148 case TARGET_NR_rt_sigreturn:
6149 /* NOTE: ret is eax, so not transcoding must be done */
6150 ret = do_rt_sigreturn(cpu_env);
6151 break;
6152 case TARGET_NR_sethostname:
6153 if (!(p = lock_user_string(arg1)))
6154 goto efault;
6155 ret = get_errno(sethostname(p, arg2));
6156 unlock_user(p, arg1, 0);
6157 break;
6158 case TARGET_NR_setrlimit:
6159 {
6160 int resource = target_to_host_resource(arg1);
6161 struct target_rlimit *target_rlim;
6162 struct rlimit rlim;
6163 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6164 goto efault;
6165 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6166 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6167 unlock_user_struct(target_rlim, arg2, 0);
6168 ret = get_errno(setrlimit(resource, &rlim));
6169 }
6170 break;
6171 case TARGET_NR_getrlimit:
6172 {
6173 int resource = target_to_host_resource(arg1);
6174 struct target_rlimit *target_rlim;
6175 struct rlimit rlim;
6176
6177 ret = get_errno(getrlimit(resource, &rlim));
6178 if (!is_error(ret)) {
6179 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6180 goto efault;
6181 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6182 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6183 unlock_user_struct(target_rlim, arg2, 1);
6184 }
6185 }
6186 break;
6187 case TARGET_NR_getrusage:
6188 {
6189 struct rusage rusage;
6190 ret = get_errno(getrusage(arg1, &rusage));
6191 if (!is_error(ret)) {
6192 host_to_target_rusage(arg2, &rusage);
6193 }
6194 }
6195 break;
6196 case TARGET_NR_gettimeofday:
6197 {
6198 struct timeval tv;
6199 ret = get_errno(gettimeofday(&tv, NULL));
6200 if (!is_error(ret)) {
6201 if (copy_to_user_timeval(arg1, &tv))
6202 goto efault;
6203 }
6204 }
6205 break;
6206 case TARGET_NR_settimeofday:
6207 {
6208 struct timeval tv;
6209 if (copy_from_user_timeval(&tv, arg1))
6210 goto efault;
6211 ret = get_errno(settimeofday(&tv, NULL));
6212 }
6213 break;
6214 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
6215 case TARGET_NR_select:
6216 {
6217 struct target_sel_arg_struct *sel;
6218 abi_ulong inp, outp, exp, tvp;
6219 long nsel;
6220
6221 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6222 goto efault;
6223 nsel = tswapal(sel->n);
6224 inp = tswapal(sel->inp);
6225 outp = tswapal(sel->outp);
6226 exp = tswapal(sel->exp);
6227 tvp = tswapal(sel->tvp);
6228 unlock_user_struct(sel, arg1, 0);
6229 ret = do_select(nsel, inp, outp, exp, tvp);
6230 }
6231 break;
6232 #endif
6233 #ifdef TARGET_NR_pselect6
6234 case TARGET_NR_pselect6:
6235 {
6236 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6237 fd_set rfds, wfds, efds;
6238 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6239 struct timespec ts, *ts_ptr;
6240
6241 /*
6242 * The 6th arg is actually two args smashed together,
6243 * so we cannot use the C library.
6244 */
6245 sigset_t set;
6246 struct {
6247 sigset_t *set;
6248 size_t size;
6249 } sig, *sig_ptr;
6250
6251 abi_ulong arg_sigset, arg_sigsize, *arg7;
6252 target_sigset_t *target_sigset;
6253
6254 n = arg1;
6255 rfd_addr = arg2;
6256 wfd_addr = arg3;
6257 efd_addr = arg4;
6258 ts_addr = arg5;
6259
6260 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6261 if (ret) {
6262 goto fail;
6263 }
6264 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6265 if (ret) {
6266 goto fail;
6267 }
6268 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6269 if (ret) {
6270 goto fail;
6271 }
6272
6273 /*
6274 * This takes a timespec, and not a timeval, so we cannot
6275 * use the do_select() helper ...
6276 */
6277 if (ts_addr) {
6278 if (target_to_host_timespec(&ts, ts_addr)) {
6279 goto efault;
6280 }
6281 ts_ptr = &ts;
6282 } else {
6283 ts_ptr = NULL;
6284 }
6285
6286 /* Extract the two packed args for the sigset */
6287 if (arg6) {
6288 sig_ptr = &sig;
6289 sig.size = _NSIG / 8;
6290
6291 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6292 if (!arg7) {
6293 goto efault;
6294 }
6295 arg_sigset = tswapal(arg7[0]);
6296 arg_sigsize = tswapal(arg7[1]);
6297 unlock_user(arg7, arg6, 0);
6298
6299 if (arg_sigset) {
6300 sig.set = &set;
6301 if (arg_sigsize != sizeof(*target_sigset)) {
6302 /* Like the kernel, we enforce correct size sigsets */
6303 ret = -TARGET_EINVAL;
6304 goto fail;
6305 }
6306 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6307 sizeof(*target_sigset), 1);
6308 if (!target_sigset) {
6309 goto efault;
6310 }
6311 target_to_host_sigset(&set, target_sigset);
6312 unlock_user(target_sigset, arg_sigset, 0);
6313 } else {
6314 sig.set = NULL;
6315 }
6316 } else {
6317 sig_ptr = NULL;
6318 }
6319
6320 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6321 ts_ptr, sig_ptr));
6322
6323 if (!is_error(ret)) {
6324 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6325 goto efault;
6326 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6327 goto efault;
6328 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6329 goto efault;
6330
6331 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6332 goto efault;
6333 }
6334 }
6335 break;
6336 #endif
6337 case TARGET_NR_symlink:
6338 {
6339 void *p2;
6340 p = lock_user_string(arg1);
6341 p2 = lock_user_string(arg2);
6342 if (!p || !p2)
6343 ret = -TARGET_EFAULT;
6344 else
6345 ret = get_errno(symlink(p, p2));
6346 unlock_user(p2, arg2, 0);
6347 unlock_user(p, arg1, 0);
6348 }
6349 break;
6350 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6351 case TARGET_NR_symlinkat:
6352 {
6353 void *p2;
6354 p = lock_user_string(arg1);
6355 p2 = lock_user_string(arg3);
6356 if (!p || !p2)
6357 ret = -TARGET_EFAULT;
6358 else
6359 ret = get_errno(sys_symlinkat(p, arg2, p2));
6360 unlock_user(p2, arg3, 0);
6361 unlock_user(p, arg1, 0);
6362 }
6363 break;
6364 #endif
6365 #ifdef TARGET_NR_oldlstat
6366 case TARGET_NR_oldlstat:
6367 goto unimplemented;
6368 #endif
6369 case TARGET_NR_readlink:
6370 {
6371 void *p2, *temp;
6372 p = lock_user_string(arg1);
6373 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6374 if (!p || !p2)
6375 ret = -TARGET_EFAULT;
6376 else {
6377 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6378 char real[PATH_MAX];
6379 temp = realpath(exec_path,real);
6380 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6381 snprintf((char *)p2, arg3, "%s", real);
6382 }
6383 else
6384 ret = get_errno(readlink(path(p), p2, arg3));
6385 }
6386 unlock_user(p2, arg2, ret);
6387 unlock_user(p, arg1, 0);
6388 }
6389 break;
6390 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6391 case TARGET_NR_readlinkat:
6392 {
6393 void *p2;
6394 p = lock_user_string(arg2);
6395 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6396 if (!p || !p2)
6397 ret = -TARGET_EFAULT;
6398 else
6399 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6400 unlock_user(p2, arg3, ret);
6401 unlock_user(p, arg2, 0);
6402 }
6403 break;
6404 #endif
6405 #ifdef TARGET_NR_uselib
6406 case TARGET_NR_uselib:
6407 goto unimplemented;
6408 #endif
6409 #ifdef TARGET_NR_swapon
6410 case TARGET_NR_swapon:
6411 if (!(p = lock_user_string(arg1)))
6412 goto efault;
6413 ret = get_errno(swapon(p, arg2));
6414 unlock_user(p, arg1, 0);
6415 break;
6416 #endif
6417 case TARGET_NR_reboot:
6418 if (!(p = lock_user_string(arg4)))
6419 goto efault;
6420 ret = reboot(arg1, arg2, arg3, p);
6421 unlock_user(p, arg4, 0);
6422 break;
6423 #ifdef TARGET_NR_readdir
6424 case TARGET_NR_readdir:
6425 goto unimplemented;
6426 #endif
6427 #ifdef TARGET_NR_mmap
6428 case TARGET_NR_mmap:
6429 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6430 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6431 || defined(TARGET_S390X)
6432 {
6433 abi_ulong *v;
6434 abi_ulong v1, v2, v3, v4, v5, v6;
6435 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6436 goto efault;
6437 v1 = tswapal(v[0]);
6438 v2 = tswapal(v[1]);
6439 v3 = tswapal(v[2]);
6440 v4 = tswapal(v[3]);
6441 v5 = tswapal(v[4]);
6442 v6 = tswapal(v[5]);
6443 unlock_user(v, arg1, 0);
6444 ret = get_errno(target_mmap(v1, v2, v3,
6445 target_to_host_bitmask(v4, mmap_flags_tbl),
6446 v5, v6));
6447 }
6448 #else
6449 ret = get_errno(target_mmap(arg1, arg2, arg3,
6450 target_to_host_bitmask(arg4, mmap_flags_tbl),
6451 arg5,
6452 arg6));
6453 #endif
6454 break;
6455 #endif
6456 #ifdef TARGET_NR_mmap2
6457 case TARGET_NR_mmap2:
6458 #ifndef MMAP_SHIFT
6459 #define MMAP_SHIFT 12
6460 #endif
6461 ret = get_errno(target_mmap(arg1, arg2, arg3,
6462 target_to_host_bitmask(arg4, mmap_flags_tbl),
6463 arg5,
6464 arg6 << MMAP_SHIFT));
6465 break;
6466 #endif
6467 case TARGET_NR_munmap:
6468 ret = get_errno(target_munmap(arg1, arg2));
6469 break;
6470 case TARGET_NR_mprotect:
6471 {
6472 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6473 /* Special hack to detect libc making the stack executable. */
6474 if ((arg3 & PROT_GROWSDOWN)
6475 && arg1 >= ts->info->stack_limit
6476 && arg1 <= ts->info->start_stack) {
6477 arg3 &= ~PROT_GROWSDOWN;
6478 arg2 = arg2 + arg1 - ts->info->stack_limit;
6479 arg1 = ts->info->stack_limit;
6480 }
6481 }
6482 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6483 break;
6484 #ifdef TARGET_NR_mremap
6485 case TARGET_NR_mremap:
6486 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6487 break;
6488 #endif
6489 /* ??? msync/mlock/munlock are broken for softmmu. */
6490 #ifdef TARGET_NR_msync
6491 case TARGET_NR_msync:
6492 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6493 break;
6494 #endif
6495 #ifdef TARGET_NR_mlock
6496 case TARGET_NR_mlock:
6497 ret = get_errno(mlock(g2h(arg1), arg2));
6498 break;
6499 #endif
6500 #ifdef TARGET_NR_munlock
6501 case TARGET_NR_munlock:
6502 ret = get_errno(munlock(g2h(arg1), arg2));
6503 break;
6504 #endif
6505 #ifdef TARGET_NR_mlockall
6506 case TARGET_NR_mlockall:
6507 ret = get_errno(mlockall(arg1));
6508 break;
6509 #endif
6510 #ifdef TARGET_NR_munlockall
6511 case TARGET_NR_munlockall:
6512 ret = get_errno(munlockall());
6513 break;
6514 #endif
6515 case TARGET_NR_truncate:
6516 if (!(p = lock_user_string(arg1)))
6517 goto efault;
6518 ret = get_errno(truncate(p, arg2));
6519 unlock_user(p, arg1, 0);
6520 break;
6521 case TARGET_NR_ftruncate:
6522 ret = get_errno(ftruncate(arg1, arg2));
6523 break;
6524 case TARGET_NR_fchmod:
6525 ret = get_errno(fchmod(arg1, arg2));
6526 break;
6527 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6528 case TARGET_NR_fchmodat:
6529 if (!(p = lock_user_string(arg2)))
6530 goto efault;
6531 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6532 unlock_user(p, arg2, 0);
6533 break;
6534 #endif
6535 case TARGET_NR_getpriority:
6536 /* Note that negative values are valid for getpriority, so we must
6537 differentiate based on errno settings. */
6538 errno = 0;
6539 ret = getpriority(arg1, arg2);
6540 if (ret == -1 && errno != 0) {
6541 ret = -host_to_target_errno(errno);
6542 break;
6543 }
6544 #ifdef TARGET_ALPHA
6545 /* Return value is the unbiased priority. Signal no error. */
6546 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6547 #else
6548 /* Return value is a biased priority to avoid negative numbers. */
6549 ret = 20 - ret;
6550 #endif
6551 break;
6552 case TARGET_NR_setpriority:
6553 ret = get_errno(setpriority(arg1, arg2, arg3));
6554 break;
6555 #ifdef TARGET_NR_profil
6556 case TARGET_NR_profil:
6557 goto unimplemented;
6558 #endif
6559 case TARGET_NR_statfs:
6560 if (!(p = lock_user_string(arg1)))
6561 goto efault;
6562 ret = get_errno(statfs(path(p), &stfs));
6563 unlock_user(p, arg1, 0);
6564 convert_statfs:
6565 if (!is_error(ret)) {
6566 struct target_statfs *target_stfs;
6567
6568 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6569 goto efault;
6570 __put_user(stfs.f_type, &target_stfs->f_type);
6571 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6572 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6573 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6574 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6575 __put_user(stfs.f_files, &target_stfs->f_files);
6576 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6577 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6578 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6579 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6580 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6581 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6582 unlock_user_struct(target_stfs, arg2, 1);
6583 }
6584 break;
6585 case TARGET_NR_fstatfs:
6586 ret = get_errno(fstatfs(arg1, &stfs));
6587 goto convert_statfs;
6588 #ifdef TARGET_NR_statfs64
6589 case TARGET_NR_statfs64:
6590 if (!(p = lock_user_string(arg1)))
6591 goto efault;
6592 ret = get_errno(statfs(path(p), &stfs));
6593 unlock_user(p, arg1, 0);
6594 convert_statfs64:
6595 if (!is_error(ret)) {
6596 struct target_statfs64 *target_stfs;
6597
6598 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6599 goto efault;
6600 __put_user(stfs.f_type, &target_stfs->f_type);
6601 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6602 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6603 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6604 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6605 __put_user(stfs.f_files, &target_stfs->f_files);
6606 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6607 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6608 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6609 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6610 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6611 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6612 unlock_user_struct(target_stfs, arg3, 1);
6613 }
6614 break;
6615 case TARGET_NR_fstatfs64:
6616 ret = get_errno(fstatfs(arg1, &stfs));
6617 goto convert_statfs64;
6618 #endif
6619 #ifdef TARGET_NR_ioperm
6620 case TARGET_NR_ioperm:
6621 goto unimplemented;
6622 #endif
6623 #ifdef TARGET_NR_socketcall
6624 case TARGET_NR_socketcall:
6625 ret = do_socketcall(arg1, arg2);
6626 break;
6627 #endif
6628 #ifdef TARGET_NR_accept
6629 case TARGET_NR_accept:
6630 ret = do_accept(arg1, arg2, arg3);
6631 break;
6632 #endif
6633 #ifdef TARGET_NR_bind
6634 case TARGET_NR_bind:
6635 ret = do_bind(arg1, arg2, arg3);
6636 break;
6637 #endif
6638 #ifdef TARGET_NR_connect
6639 case TARGET_NR_connect:
6640 ret = do_connect(arg1, arg2, arg3);
6641 break;
6642 #endif
6643 #ifdef TARGET_NR_getpeername
6644 case TARGET_NR_getpeername:
6645 ret = do_getpeername(arg1, arg2, arg3);
6646 break;
6647 #endif
6648 #ifdef TARGET_NR_getsockname
6649 case TARGET_NR_getsockname:
6650 ret = do_getsockname(arg1, arg2, arg3);
6651 break;
6652 #endif
6653 #ifdef TARGET_NR_getsockopt
6654 case TARGET_NR_getsockopt:
6655 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6656 break;
6657 #endif
6658 #ifdef TARGET_NR_listen
6659 case TARGET_NR_listen:
6660 ret = get_errno(listen(arg1, arg2));
6661 break;
6662 #endif
6663 #ifdef TARGET_NR_recv
6664 case TARGET_NR_recv:
6665 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6666 break;
6667 #endif
6668 #ifdef TARGET_NR_recvfrom
6669 case TARGET_NR_recvfrom:
6670 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6671 break;
6672 #endif
6673 #ifdef TARGET_NR_recvmsg
6674 case TARGET_NR_recvmsg:
6675 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6676 break;
6677 #endif
6678 #ifdef TARGET_NR_send
6679 case TARGET_NR_send:
6680 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6681 break;
6682 #endif
6683 #ifdef TARGET_NR_sendmsg
6684 case TARGET_NR_sendmsg:
6685 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6686 break;
6687 #endif
6688 #ifdef TARGET_NR_sendto
6689 case TARGET_NR_sendto:
6690 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6691 break;
6692 #endif
6693 #ifdef TARGET_NR_shutdown
6694 case TARGET_NR_shutdown:
6695 ret = get_errno(shutdown(arg1, arg2));
6696 break;
6697 #endif
6698 #ifdef TARGET_NR_socket
6699 case TARGET_NR_socket:
6700 ret = do_socket(arg1, arg2, arg3);
6701 break;
6702 #endif
6703 #ifdef TARGET_NR_socketpair
6704 case TARGET_NR_socketpair:
6705 ret = do_socketpair(arg1, arg2, arg3, arg4);
6706 break;
6707 #endif
6708 #ifdef TARGET_NR_setsockopt
6709 case TARGET_NR_setsockopt:
6710 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6711 break;
6712 #endif
6713
6714 case TARGET_NR_syslog:
6715 if (!(p = lock_user_string(arg2)))
6716 goto efault;
6717 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6718 unlock_user(p, arg2, 0);
6719 break;
6720
6721 case TARGET_NR_setitimer:
6722 {
6723 struct itimerval value, ovalue, *pvalue;
6724
6725 if (arg2) {
6726 pvalue = &value;
6727 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6728 || copy_from_user_timeval(&pvalue->it_value,
6729 arg2 + sizeof(struct target_timeval)))
6730 goto efault;
6731 } else {
6732 pvalue = NULL;
6733 }
6734 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6735 if (!is_error(ret) && arg3) {
6736 if (copy_to_user_timeval(arg3,
6737 &ovalue.it_interval)
6738 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6739 &ovalue.it_value))
6740 goto efault;
6741 }
6742 }
6743 break;
6744 case TARGET_NR_getitimer:
6745 {
6746 struct itimerval value;
6747
6748 ret = get_errno(getitimer(arg1, &value));
6749 if (!is_error(ret) && arg2) {
6750 if (copy_to_user_timeval(arg2,
6751 &value.it_interval)
6752 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6753 &value.it_value))
6754 goto efault;
6755 }
6756 }
6757 break;
6758 case TARGET_NR_stat:
6759 if (!(p = lock_user_string(arg1)))
6760 goto efault;
6761 ret = get_errno(stat(path(p), &st));
6762 unlock_user(p, arg1, 0);
6763 goto do_stat;
6764 case TARGET_NR_lstat:
6765 if (!(p = lock_user_string(arg1)))
6766 goto efault;
6767 ret = get_errno(lstat(path(p), &st));
6768 unlock_user(p, arg1, 0);
6769 goto do_stat;
6770 case TARGET_NR_fstat:
6771 {
6772 ret = get_errno(fstat(arg1, &st));
6773 do_stat:
6774 if (!is_error(ret)) {
6775 struct target_stat *target_st;
6776
6777 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6778 goto efault;
6779 memset(target_st, 0, sizeof(*target_st));
6780 __put_user(st.st_dev, &target_st->st_dev);
6781 __put_user(st.st_ino, &target_st->st_ino);
6782 __put_user(st.st_mode, &target_st->st_mode);
6783 __put_user(st.st_uid, &target_st->st_uid);
6784 __put_user(st.st_gid, &target_st->st_gid);
6785 __put_user(st.st_nlink, &target_st->st_nlink);
6786 __put_user(st.st_rdev, &target_st->st_rdev);
6787 __put_user(st.st_size, &target_st->st_size);
6788 __put_user(st.st_blksize, &target_st->st_blksize);
6789 __put_user(st.st_blocks, &target_st->st_blocks);
6790 __put_user(st.st_atime, &target_st->target_st_atime);
6791 __put_user(st.st_mtime, &target_st->target_st_mtime);
6792 __put_user(st.st_ctime, &target_st->target_st_ctime);
6793 unlock_user_struct(target_st, arg2, 1);
6794 }
6795 }
6796 break;
6797 #ifdef TARGET_NR_olduname
6798 case TARGET_NR_olduname:
6799 goto unimplemented;
6800 #endif
6801 #ifdef TARGET_NR_iopl
6802 case TARGET_NR_iopl:
6803 goto unimplemented;
6804 #endif
6805 case TARGET_NR_vhangup:
6806 ret = get_errno(vhangup());
6807 break;
6808 #ifdef TARGET_NR_idle
6809 case TARGET_NR_idle:
6810 goto unimplemented;
6811 #endif
6812 #ifdef TARGET_NR_syscall
6813 case TARGET_NR_syscall:
6814 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6815 arg6, arg7, arg8, 0);
6816 break;
6817 #endif
6818 case TARGET_NR_wait4:
6819 {
6820 int status;
6821 abi_long status_ptr = arg2;
6822 struct rusage rusage, *rusage_ptr;
6823 abi_ulong target_rusage = arg4;
6824 if (target_rusage)
6825 rusage_ptr = &rusage;
6826 else
6827 rusage_ptr = NULL;
6828 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6829 if (!is_error(ret)) {
6830 if (status_ptr && ret) {
6831 status = host_to_target_waitstatus(status);
6832 if (put_user_s32(status, status_ptr))
6833 goto efault;
6834 }
6835 if (target_rusage)
6836 host_to_target_rusage(target_rusage, &rusage);
6837 }
6838 }
6839 break;
6840 #ifdef TARGET_NR_swapoff
6841 case TARGET_NR_swapoff:
6842 if (!(p = lock_user_string(arg1)))
6843 goto efault;
6844 ret = get_errno(swapoff(p));
6845 unlock_user(p, arg1, 0);
6846 break;
6847 #endif
6848 case TARGET_NR_sysinfo:
6849 {
6850 struct target_sysinfo *target_value;
6851 struct sysinfo value;
6852 ret = get_errno(sysinfo(&value));
6853 if (!is_error(ret) && arg1)
6854 {
6855 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6856 goto efault;
6857 __put_user(value.uptime, &target_value->uptime);
6858 __put_user(value.loads[0], &target_value->loads[0]);
6859 __put_user(value.loads[1], &target_value->loads[1]);
6860 __put_user(value.loads[2], &target_value->loads[2]);
6861 __put_user(value.totalram, &target_value->totalram);
6862 __put_user(value.freeram, &target_value->freeram);
6863 __put_user(value.sharedram, &target_value->sharedram);
6864 __put_user(value.bufferram, &target_value->bufferram);
6865 __put_user(value.totalswap, &target_value->totalswap);
6866 __put_user(value.freeswap, &target_value->freeswap);
6867 __put_user(value.procs, &target_value->procs);
6868 __put_user(value.totalhigh, &target_value->totalhigh);
6869 __put_user(value.freehigh, &target_value->freehigh);
6870 __put_user(value.mem_unit, &target_value->mem_unit);
6871 unlock_user_struct(target_value, arg1, 1);
6872 }
6873 }
6874 break;
6875 #ifdef TARGET_NR_ipc
6876 case TARGET_NR_ipc:
6877 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6878 break;
6879 #endif
6880 #ifdef TARGET_NR_semget
6881 case TARGET_NR_semget:
6882 ret = get_errno(semget(arg1, arg2, arg3));
6883 break;
6884 #endif
6885 #ifdef TARGET_NR_semop
6886 case TARGET_NR_semop:
6887 ret = get_errno(do_semop(arg1, arg2, arg3));
6888 break;
6889 #endif
6890 #ifdef TARGET_NR_semctl
6891 case TARGET_NR_semctl:
6892 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6893 break;
6894 #endif
6895 #ifdef TARGET_NR_msgctl
6896 case TARGET_NR_msgctl:
6897 ret = do_msgctl(arg1, arg2, arg3);
6898 break;
6899 #endif
6900 #ifdef TARGET_NR_msgget
6901 case TARGET_NR_msgget:
6902 ret = get_errno(msgget(arg1, arg2));
6903 break;
6904 #endif
6905 #ifdef TARGET_NR_msgrcv
6906 case TARGET_NR_msgrcv:
6907 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6908 break;
6909 #endif
6910 #ifdef TARGET_NR_msgsnd
6911 case TARGET_NR_msgsnd:
6912 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6913 break;
6914 #endif
6915 #ifdef TARGET_NR_shmget
6916 case TARGET_NR_shmget:
6917 ret = get_errno(shmget(arg1, arg2, arg3));
6918 break;
6919 #endif
6920 #ifdef TARGET_NR_shmctl
6921 case TARGET_NR_shmctl:
6922 ret = do_shmctl(arg1, arg2, arg3);
6923 break;
6924 #endif
6925 #ifdef TARGET_NR_shmat
6926 case TARGET_NR_shmat:
6927 ret = do_shmat(arg1, arg2, arg3);
6928 break;
6929 #endif
6930 #ifdef TARGET_NR_shmdt
6931 case TARGET_NR_shmdt:
6932 ret = do_shmdt(arg1);
6933 break;
6934 #endif
6935 case TARGET_NR_fsync:
6936 ret = get_errno(fsync(arg1));
6937 break;
6938 case TARGET_NR_clone:
6939 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6940 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6941 #elif defined(TARGET_CRIS)
6942 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6943 #elif defined(TARGET_MICROBLAZE)
6944 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6945 #elif defined(TARGET_S390X)
6946 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6947 #else
6948 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6949 #endif
6950 break;
6951 #ifdef __NR_exit_group
6952 /* new thread calls */
6953 case TARGET_NR_exit_group:
6954 #ifdef TARGET_GPROF
6955 _mcleanup();
6956 #endif
6957 gdb_exit(cpu_env, arg1);
6958 ret = get_errno(exit_group(arg1));
6959 break;
6960 #endif
6961 case TARGET_NR_setdomainname:
6962 if (!(p = lock_user_string(arg1)))
6963 goto efault;
6964 ret = get_errno(setdomainname(p, arg2));
6965 unlock_user(p, arg1, 0);
6966 break;
6967 case TARGET_NR_uname:
6968 /* no need to transcode because we use the linux syscall */
6969 {
6970 struct new_utsname * buf;
6971
6972 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6973 goto efault;
6974 ret = get_errno(sys_uname(buf));
6975 if (!is_error(ret)) {
6976 /* Overrite the native machine name with whatever is being
6977 emulated. */
6978 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6979 /* Allow the user to override the reported release. */
6980 if (qemu_uname_release && *qemu_uname_release)
6981 strcpy (buf->release, qemu_uname_release);
6982 }
6983 unlock_user_struct(buf, arg1, 1);
6984 }
6985 break;
6986 #ifdef TARGET_I386
6987 case TARGET_NR_modify_ldt:
6988 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6989 break;
6990 #if !defined(TARGET_X86_64)
6991 case TARGET_NR_vm86old:
6992 goto unimplemented;
6993 case TARGET_NR_vm86:
6994 ret = do_vm86(cpu_env, arg1, arg2);
6995 break;
6996 #endif
6997 #endif
6998 case TARGET_NR_adjtimex:
6999 goto unimplemented;
7000 #ifdef TARGET_NR_create_module
7001 case TARGET_NR_create_module:
7002 #endif
7003 case TARGET_NR_init_module:
7004 case TARGET_NR_delete_module:
7005 #ifdef TARGET_NR_get_kernel_syms
7006 case TARGET_NR_get_kernel_syms:
7007 #endif
7008 goto unimplemented;
7009 case TARGET_NR_quotactl:
7010 goto unimplemented;
7011 case TARGET_NR_getpgid:
7012 ret = get_errno(getpgid(arg1));
7013 break;
7014 case TARGET_NR_fchdir:
7015 ret = get_errno(fchdir(arg1));
7016 break;
7017 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7018 case TARGET_NR_bdflush:
7019 goto unimplemented;
7020 #endif
7021 #ifdef TARGET_NR_sysfs
7022 case TARGET_NR_sysfs:
7023 goto unimplemented;
7024 #endif
7025 case TARGET_NR_personality:
7026 ret = get_errno(personality(arg1));
7027 break;
7028 #ifdef TARGET_NR_afs_syscall
7029 case TARGET_NR_afs_syscall:
7030 goto unimplemented;
7031 #endif
7032 #ifdef TARGET_NR__llseek /* Not on alpha */
7033 case TARGET_NR__llseek:
7034 {
7035 int64_t res;
7036 #if !defined(__NR_llseek)
7037 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7038 if (res == -1) {
7039 ret = get_errno(res);
7040 } else {
7041 ret = 0;
7042 }
7043 #else
7044 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7045 #endif
7046 if ((ret == 0) && put_user_s64(res, arg4)) {
7047 goto efault;
7048 }
7049 }
7050 break;
7051 #endif
7052 case TARGET_NR_getdents:
7053 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7054 {
7055 struct target_dirent *target_dirp;
7056 struct linux_dirent *dirp;
7057 abi_long count = arg3;
7058
7059 dirp = malloc(count);
7060 if (!dirp) {
7061 ret = -TARGET_ENOMEM;
7062 goto fail;
7063 }
7064
7065 ret = get_errno(sys_getdents(arg1, dirp, count));
7066 if (!is_error(ret)) {
7067 struct linux_dirent *de;
7068 struct target_dirent *tde;
7069 int len = ret;
7070 int reclen, treclen;
7071 int count1, tnamelen;
7072
7073 count1 = 0;
7074 de = dirp;
7075 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7076 goto efault;
7077 tde = target_dirp;
7078 while (len > 0) {
7079 reclen = de->d_reclen;
7080 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7081 assert(tnamelen >= 0);
7082 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7083 assert(count1 + treclen <= count);
7084 tde->d_reclen = tswap16(treclen);
7085 tde->d_ino = tswapal(de->d_ino);
7086 tde->d_off = tswapal(de->d_off);
7087 memcpy(tde->d_name, de->d_name, tnamelen);
7088 de = (struct linux_dirent *)((char *)de + reclen);
7089 len -= reclen;
7090 tde = (struct target_dirent *)((char *)tde + treclen);
7091 count1 += treclen;
7092 }
7093 ret = count1;
7094 unlock_user(target_dirp, arg2, ret);
7095 }
7096 free(dirp);
7097 }
7098 #else
7099 {
7100 struct linux_dirent *dirp;
7101 abi_long count = arg3;
7102
7103 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7104 goto efault;
7105 ret = get_errno(sys_getdents(arg1, dirp, count));
7106 if (!is_error(ret)) {
7107 struct linux_dirent *de;
7108 int len = ret;
7109 int reclen;
7110 de = dirp;
7111 while (len > 0) {
7112 reclen = de->d_reclen;
7113 if (reclen > len)
7114 break;
7115 de->d_reclen = tswap16(reclen);
7116 tswapls(&de->d_ino);
7117 tswapls(&de->d_off);
7118 de = (struct linux_dirent *)((char *)de + reclen);
7119 len -= reclen;
7120 }
7121 }
7122 unlock_user(dirp, arg2, ret);
7123 }
7124 #endif
7125 break;
7126 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7127 case TARGET_NR_getdents64:
7128 {
7129 struct linux_dirent64 *dirp;
7130 abi_long count = arg3;
7131 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7132 goto efault;
7133 ret = get_errno(sys_getdents64(arg1, dirp, count));
7134 if (!is_error(ret)) {
7135 struct linux_dirent64 *de;
7136 int len = ret;
7137 int reclen;
7138 de = dirp;
7139 while (len > 0) {
7140 reclen = de->d_reclen;
7141 if (reclen > len)
7142 break;
7143 de->d_reclen = tswap16(reclen);
7144 tswap64s((uint64_t *)&de->d_ino);
7145 tswap64s((uint64_t *)&de->d_off);
7146 de = (struct linux_dirent64 *)((char *)de + reclen);
7147 len -= reclen;
7148 }
7149 }
7150 unlock_user(dirp, arg2, ret);
7151 }
7152 break;
7153 #endif /* TARGET_NR_getdents64 */
7154 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
7155 #ifdef TARGET_S390X
7156 case TARGET_NR_select:
7157 #else
7158 case TARGET_NR__newselect:
7159 #endif
7160 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7161 break;
7162 #endif
7163 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7164 # ifdef TARGET_NR_poll
7165 case TARGET_NR_poll:
7166 # endif
7167 # ifdef TARGET_NR_ppoll
7168 case TARGET_NR_ppoll:
7169 # endif
7170 {
7171 struct target_pollfd *target_pfd;
7172 unsigned int nfds = arg2;
7173 int timeout = arg3;
7174 struct pollfd *pfd;
7175 unsigned int i;
7176
7177 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7178 if (!target_pfd)
7179 goto efault;
7180
7181 pfd = alloca(sizeof(struct pollfd) * nfds);
7182 for(i = 0; i < nfds; i++) {
7183 pfd[i].fd = tswap32(target_pfd[i].fd);
7184 pfd[i].events = tswap16(target_pfd[i].events);
7185 }
7186
7187 # ifdef TARGET_NR_ppoll
7188 if (num == TARGET_NR_ppoll) {
7189 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7190 target_sigset_t *target_set;
7191 sigset_t _set, *set = &_set;
7192
7193 if (arg3) {
7194 if (target_to_host_timespec(timeout_ts, arg3)) {
7195 unlock_user(target_pfd, arg1, 0);
7196 goto efault;
7197 }
7198 } else {
7199 timeout_ts = NULL;
7200 }
7201
7202 if (arg4) {
7203 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7204 if (!target_set) {
7205 unlock_user(target_pfd, arg1, 0);
7206 goto efault;
7207 }
7208 target_to_host_sigset(set, target_set);
7209 } else {
7210 set = NULL;
7211 }
7212
7213 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7214
7215 if (!is_error(ret) && arg3) {
7216 host_to_target_timespec(arg3, timeout_ts);
7217 }
7218 if (arg4) {
7219 unlock_user(target_set, arg4, 0);
7220 }
7221 } else
7222 # endif
7223 ret = get_errno(poll(pfd, nfds, timeout));
7224
7225 if (!is_error(ret)) {
7226 for(i = 0; i < nfds; i++) {
7227 target_pfd[i].revents = tswap16(pfd[i].revents);
7228 }
7229 }
7230 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7231 }
7232 break;
7233 #endif
7234 case TARGET_NR_flock:
7235 /* NOTE: the flock constant seems to be the same for every
7236 Linux platform */
7237 ret = get_errno(flock(arg1, arg2));
7238 break;
7239 case TARGET_NR_readv:
7240 {
7241 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7242 if (vec != NULL) {
7243 ret = get_errno(readv(arg1, vec, arg3));
7244 unlock_iovec(vec, arg2, arg3, 1);
7245 } else {
7246 ret = -host_to_target_errno(errno);
7247 }
7248 }
7249 break;
7250 case TARGET_NR_writev:
7251 {
7252 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7253 if (vec != NULL) {
7254 ret = get_errno(writev(arg1, vec, arg3));
7255 unlock_iovec(vec, arg2, arg3, 0);
7256 } else {
7257 ret = -host_to_target_errno(errno);
7258 }
7259 }
7260 break;
7261 case TARGET_NR_getsid:
7262 ret = get_errno(getsid(arg1));
7263 break;
7264 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7265 case TARGET_NR_fdatasync:
7266 ret = get_errno(fdatasync(arg1));
7267 break;
7268 #endif
7269 case TARGET_NR__sysctl:
7270 /* We don't implement this, but ENOTDIR is always a safe
7271 return value. */
7272 ret = -TARGET_ENOTDIR;
7273 break;
7274 case TARGET_NR_sched_getaffinity:
7275 {
7276 unsigned int mask_size;
7277 unsigned long *mask;
7278
7279 /*
7280 * sched_getaffinity needs multiples of ulong, so need to take
7281 * care of mismatches between target ulong and host ulong sizes.
7282 */
7283 if (arg2 & (sizeof(abi_ulong) - 1)) {
7284 ret = -TARGET_EINVAL;
7285 break;
7286 }
7287 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7288
7289 mask = alloca(mask_size);
7290 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7291
7292 if (!is_error(ret)) {
7293 if (copy_to_user(arg3, mask, ret)) {
7294 goto efault;
7295 }
7296 }
7297 }
7298 break;
7299 case TARGET_NR_sched_setaffinity:
7300 {
7301 unsigned int mask_size;
7302 unsigned long *mask;
7303
7304 /*
7305 * sched_setaffinity needs multiples of ulong, so need to take
7306 * care of mismatches between target ulong and host ulong sizes.
7307 */
7308 if (arg2 & (sizeof(abi_ulong) - 1)) {
7309 ret = -TARGET_EINVAL;
7310 break;
7311 }
7312 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7313
7314 mask = alloca(mask_size);
7315 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7316 goto efault;
7317 }
7318 memcpy(mask, p, arg2);
7319 unlock_user_struct(p, arg2, 0);
7320
7321 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7322 }
7323 break;
7324 case TARGET_NR_sched_setparam:
7325 {
7326 struct sched_param *target_schp;
7327 struct sched_param schp;
7328
7329 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7330 goto efault;
7331 schp.sched_priority = tswap32(target_schp->sched_priority);
7332 unlock_user_struct(target_schp, arg2, 0);
7333 ret = get_errno(sched_setparam(arg1, &schp));
7334 }
7335 break;
7336 case TARGET_NR_sched_getparam:
7337 {
7338 struct sched_param *target_schp;
7339 struct sched_param schp;
7340 ret = get_errno(sched_getparam(arg1, &schp));
7341 if (!is_error(ret)) {
7342 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7343 goto efault;
7344 target_schp->sched_priority = tswap32(schp.sched_priority);
7345 unlock_user_struct(target_schp, arg2, 1);
7346 }
7347 }
7348 break;
7349 case TARGET_NR_sched_setscheduler:
7350 {
7351 struct sched_param *target_schp;
7352 struct sched_param schp;
7353 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7354 goto efault;
7355 schp.sched_priority = tswap32(target_schp->sched_priority);
7356 unlock_user_struct(target_schp, arg3, 0);
7357 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7358 }
7359 break;
7360 case TARGET_NR_sched_getscheduler:
7361 ret = get_errno(sched_getscheduler(arg1));
7362 break;
7363 case TARGET_NR_sched_yield:
7364 ret = get_errno(sched_yield());
7365 break;
7366 case TARGET_NR_sched_get_priority_max:
7367 ret = get_errno(sched_get_priority_max(arg1));
7368 break;
7369 case TARGET_NR_sched_get_priority_min:
7370 ret = get_errno(sched_get_priority_min(arg1));
7371 break;
7372 case TARGET_NR_sched_rr_get_interval:
7373 {
7374 struct timespec ts;
7375 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7376 if (!is_error(ret)) {
7377 host_to_target_timespec(arg2, &ts);
7378 }
7379 }
7380 break;
7381 case TARGET_NR_nanosleep:
7382 {
7383 struct timespec req, rem;
7384 target_to_host_timespec(&req, arg1);
7385 ret = get_errno(nanosleep(&req, &rem));
7386 if (is_error(ret) && arg2) {
7387 host_to_target_timespec(arg2, &rem);
7388 }
7389 }
7390 break;
7391 #ifdef TARGET_NR_query_module
7392 case TARGET_NR_query_module:
7393 goto unimplemented;
7394 #endif
7395 #ifdef TARGET_NR_nfsservctl
7396 case TARGET_NR_nfsservctl:
7397 goto unimplemented;
7398 #endif
7399 case TARGET_NR_prctl:
7400 switch (arg1) {
7401 case PR_GET_PDEATHSIG:
7402 {
7403 int deathsig;
7404 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7405 if (!is_error(ret) && arg2
7406 && put_user_ual(deathsig, arg2)) {
7407 goto efault;
7408 }
7409 break;
7410 }
7411 #ifdef PR_GET_NAME
7412 case PR_GET_NAME:
7413 {
7414 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7415 if (!name) {
7416 goto efault;
7417 }
7418 ret = get_errno(prctl(arg1, (unsigned long)name,
7419 arg3, arg4, arg5));
7420 unlock_user(name, arg2, 16);
7421 break;
7422 }
7423 case PR_SET_NAME:
7424 {
7425 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7426 if (!name) {
7427 goto efault;
7428 }
7429 ret = get_errno(prctl(arg1, (unsigned long)name,
7430 arg3, arg4, arg5));
7431 unlock_user(name, arg2, 0);
7432 break;
7433 }
7434 #endif
7435 default:
7436 /* Most prctl options have no pointer arguments */
7437 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7438 break;
7439 }
7440 break;
7441 #ifdef TARGET_NR_arch_prctl
7442 case TARGET_NR_arch_prctl:
7443 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7444 ret = do_arch_prctl(cpu_env, arg1, arg2);
7445 break;
7446 #else
7447 goto unimplemented;
7448 #endif
7449 #endif
7450 #ifdef TARGET_NR_pread64
7451 case TARGET_NR_pread64:
7452 if (regpairs_aligned(cpu_env)) {
7453 arg4 = arg5;
7454 arg5 = arg6;
7455 }
7456 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7457 goto efault;
7458 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7459 unlock_user(p, arg2, ret);
7460 break;
7461 case TARGET_NR_pwrite64:
7462 if (regpairs_aligned(cpu_env)) {
7463 arg4 = arg5;
7464 arg5 = arg6;
7465 }
7466 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7467 goto efault;
7468 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7469 unlock_user(p, arg2, 0);
7470 break;
7471 #endif
7472 case TARGET_NR_getcwd:
7473 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7474 goto efault;
7475 ret = get_errno(sys_getcwd1(p, arg2));
7476 unlock_user(p, arg1, ret);
7477 break;
7478 case TARGET_NR_capget:
7479 goto unimplemented;
7480 case TARGET_NR_capset:
7481 goto unimplemented;
7482 case TARGET_NR_sigaltstack:
7483 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7484 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7485 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7486 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7487 break;
7488 #else
7489 goto unimplemented;
7490 #endif
7491 case TARGET_NR_sendfile:
7492 goto unimplemented;
7493 #ifdef TARGET_NR_getpmsg
7494 case TARGET_NR_getpmsg:
7495 goto unimplemented;
7496 #endif
7497 #ifdef TARGET_NR_putpmsg
7498 case TARGET_NR_putpmsg:
7499 goto unimplemented;
7500 #endif
7501 #ifdef TARGET_NR_vfork
7502 case TARGET_NR_vfork:
7503 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7504 0, 0, 0, 0));
7505 break;
7506 #endif
7507 #ifdef TARGET_NR_ugetrlimit
7508 case TARGET_NR_ugetrlimit:
7509 {
7510 struct rlimit rlim;
7511 int resource = target_to_host_resource(arg1);
7512 ret = get_errno(getrlimit(resource, &rlim));
7513 if (!is_error(ret)) {
7514 struct target_rlimit *target_rlim;
7515 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7516 goto efault;
7517 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7518 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7519 unlock_user_struct(target_rlim, arg2, 1);
7520 }
7521 break;
7522 }
7523 #endif
7524 #ifdef TARGET_NR_truncate64
7525 case TARGET_NR_truncate64:
7526 if (!(p = lock_user_string(arg1)))
7527 goto efault;
7528 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7529 unlock_user(p, arg1, 0);
7530 break;
7531 #endif
7532 #ifdef TARGET_NR_ftruncate64
7533 case TARGET_NR_ftruncate64:
7534 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7535 break;
7536 #endif
7537 #ifdef TARGET_NR_stat64
7538 case TARGET_NR_stat64:
7539 if (!(p = lock_user_string(arg1)))
7540 goto efault;
7541 ret = get_errno(stat(path(p), &st));
7542 unlock_user(p, arg1, 0);
7543 if (!is_error(ret))
7544 ret = host_to_target_stat64(cpu_env, arg2, &st);
7545 break;
7546 #endif
7547 #ifdef TARGET_NR_lstat64
7548 case TARGET_NR_lstat64:
7549 if (!(p = lock_user_string(arg1)))
7550 goto efault;
7551 ret = get_errno(lstat(path(p), &st));
7552 unlock_user(p, arg1, 0);
7553 if (!is_error(ret))
7554 ret = host_to_target_stat64(cpu_env, arg2, &st);
7555 break;
7556 #endif
7557 #ifdef TARGET_NR_fstat64
7558 case TARGET_NR_fstat64:
7559 ret = get_errno(fstat(arg1, &st));
7560 if (!is_error(ret))
7561 ret = host_to_target_stat64(cpu_env, arg2, &st);
7562 break;
7563 #endif
7564 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7565 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7566 #ifdef TARGET_NR_fstatat64
7567 case TARGET_NR_fstatat64:
7568 #endif
7569 #ifdef TARGET_NR_newfstatat
7570 case TARGET_NR_newfstatat:
7571 #endif
7572 if (!(p = lock_user_string(arg2)))
7573 goto efault;
7574 #ifdef __NR_fstatat64
7575 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7576 #else
7577 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7578 #endif
7579 if (!is_error(ret))
7580 ret = host_to_target_stat64(cpu_env, arg3, &st);
7581 break;
7582 #endif
7583 case TARGET_NR_lchown:
7584 if (!(p = lock_user_string(arg1)))
7585 goto efault;
7586 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7587 unlock_user(p, arg1, 0);
7588 break;
7589 #ifdef TARGET_NR_getuid
7590 case TARGET_NR_getuid:
7591 ret = get_errno(high2lowuid(getuid()));
7592 break;
7593 #endif
7594 #ifdef TARGET_NR_getgid
7595 case TARGET_NR_getgid:
7596 ret = get_errno(high2lowgid(getgid()));
7597 break;
7598 #endif
7599 #ifdef TARGET_NR_geteuid
7600 case TARGET_NR_geteuid:
7601 ret = get_errno(high2lowuid(geteuid()));
7602 break;
7603 #endif
7604 #ifdef TARGET_NR_getegid
7605 case TARGET_NR_getegid:
7606 ret = get_errno(high2lowgid(getegid()));
7607 break;
7608 #endif
7609 case TARGET_NR_setreuid:
7610 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7611 break;
7612 case TARGET_NR_setregid:
7613 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7614 break;
7615 case TARGET_NR_getgroups:
7616 {
7617 int gidsetsize = arg1;
7618 target_id *target_grouplist;
7619 gid_t *grouplist;
7620 int i;
7621
7622 grouplist = alloca(gidsetsize * sizeof(gid_t));
7623 ret = get_errno(getgroups(gidsetsize, grouplist));
7624 if (gidsetsize == 0)
7625 break;
7626 if (!is_error(ret)) {
7627 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7628 if (!target_grouplist)
7629 goto efault;
7630 for(i = 0;i < ret; i++)
7631 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7632 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7633 }
7634 }
7635 break;
7636 case TARGET_NR_setgroups:
7637 {
7638 int gidsetsize = arg1;
7639 target_id *target_grouplist;
7640 gid_t *grouplist;
7641 int i;
7642
7643 grouplist = alloca(gidsetsize * sizeof(gid_t));
7644 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7645 if (!target_grouplist) {
7646 ret = -TARGET_EFAULT;
7647 goto fail;
7648 }
7649 for(i = 0;i < gidsetsize; i++)
7650 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7651 unlock_user(target_grouplist, arg2, 0);
7652 ret = get_errno(setgroups(gidsetsize, grouplist));
7653 }
7654 break;
7655 case TARGET_NR_fchown:
7656 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7657 break;
7658 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7659 case TARGET_NR_fchownat:
7660 if (!(p = lock_user_string(arg2)))
7661 goto efault;
7662 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7663 unlock_user(p, arg2, 0);
7664 break;
7665 #endif
7666 #ifdef TARGET_NR_setresuid
7667 case TARGET_NR_setresuid:
7668 ret = get_errno(setresuid(low2highuid(arg1),
7669 low2highuid(arg2),
7670 low2highuid(arg3)));
7671 break;
7672 #endif
7673 #ifdef TARGET_NR_getresuid
7674 case TARGET_NR_getresuid:
7675 {
7676 uid_t ruid, euid, suid;
7677 ret = get_errno(getresuid(&ruid, &euid, &suid));
7678 if (!is_error(ret)) {
7679 if (put_user_u16(high2lowuid(ruid), arg1)
7680 || put_user_u16(high2lowuid(euid), arg2)
7681 || put_user_u16(high2lowuid(suid), arg3))
7682 goto efault;
7683 }
7684 }
7685 break;
7686 #endif
7687 #ifdef TARGET_NR_getresgid
7688 case TARGET_NR_setresgid:
7689 ret = get_errno(setresgid(low2highgid(arg1),
7690 low2highgid(arg2),
7691 low2highgid(arg3)));
7692 break;
7693 #endif
7694 #ifdef TARGET_NR_getresgid
7695 case TARGET_NR_getresgid:
7696 {
7697 gid_t rgid, egid, sgid;
7698 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7699 if (!is_error(ret)) {
7700 if (put_user_u16(high2lowgid(rgid), arg1)
7701 || put_user_u16(high2lowgid(egid), arg2)
7702 || put_user_u16(high2lowgid(sgid), arg3))
7703 goto efault;
7704 }
7705 }
7706 break;
7707 #endif
7708 case TARGET_NR_chown:
7709 if (!(p = lock_user_string(arg1)))
7710 goto efault;
7711 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7712 unlock_user(p, arg1, 0);
7713 break;
7714 case TARGET_NR_setuid:
7715 ret = get_errno(setuid(low2highuid(arg1)));
7716 break;
7717 case TARGET_NR_setgid:
7718 ret = get_errno(setgid(low2highgid(arg1)));
7719 break;
7720 case TARGET_NR_setfsuid:
7721 ret = get_errno(setfsuid(arg1));
7722 break;
7723 case TARGET_NR_setfsgid:
7724 ret = get_errno(setfsgid(arg1));
7725 break;
7726
7727 #ifdef TARGET_NR_lchown32
7728 case TARGET_NR_lchown32:
7729 if (!(p = lock_user_string(arg1)))
7730 goto efault;
7731 ret = get_errno(lchown(p, arg2, arg3));
7732 unlock_user(p, arg1, 0);
7733 break;
7734 #endif
7735 #ifdef TARGET_NR_getuid32
7736 case TARGET_NR_getuid32:
7737 ret = get_errno(getuid());
7738 break;
7739 #endif
7740
7741 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7742 /* Alpha specific */
7743 case TARGET_NR_getxuid:
7744 {
7745 uid_t euid;
7746 euid=geteuid();
7747 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7748 }
7749 ret = get_errno(getuid());
7750 break;
7751 #endif
7752 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7753 /* Alpha specific */
7754 case TARGET_NR_getxgid:
7755 {
7756 uid_t egid;
7757 egid=getegid();
7758 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7759 }
7760 ret = get_errno(getgid());
7761 break;
7762 #endif
7763 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7764 /* Alpha specific */
7765 case TARGET_NR_osf_getsysinfo:
7766 ret = -TARGET_EOPNOTSUPP;
7767 switch (arg1) {
7768 case TARGET_GSI_IEEE_FP_CONTROL:
7769 {
7770 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7771
7772 /* Copied from linux ieee_fpcr_to_swcr. */
7773 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7774 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7775 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7776 | SWCR_TRAP_ENABLE_DZE
7777 | SWCR_TRAP_ENABLE_OVF);
7778 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7779 | SWCR_TRAP_ENABLE_INE);
7780 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7781 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7782
7783 if (put_user_u64 (swcr, arg2))
7784 goto efault;
7785 ret = 0;
7786 }
7787 break;
7788
7789 /* case GSI_IEEE_STATE_AT_SIGNAL:
7790 -- Not implemented in linux kernel.
7791 case GSI_UACPROC:
7792 -- Retrieves current unaligned access state; not much used.
7793 case GSI_PROC_TYPE:
7794 -- Retrieves implver information; surely not used.
7795 case GSI_GET_HWRPB:
7796 -- Grabs a copy of the HWRPB; surely not used.
7797 */
7798 }
7799 break;
7800 #endif
7801 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7802 /* Alpha specific */
7803 case TARGET_NR_osf_setsysinfo:
7804 ret = -TARGET_EOPNOTSUPP;
7805 switch (arg1) {
7806 case TARGET_SSI_IEEE_FP_CONTROL:
7807 {
7808 uint64_t swcr, fpcr, orig_fpcr;
7809
7810 if (get_user_u64 (swcr, arg2)) {
7811 goto efault;
7812 }
7813 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7814 fpcr = orig_fpcr & FPCR_DYN_MASK;
7815
7816 /* Copied from linux ieee_swcr_to_fpcr. */
7817 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7818 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7819 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7820 | SWCR_TRAP_ENABLE_DZE
7821 | SWCR_TRAP_ENABLE_OVF)) << 48;
7822 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7823 | SWCR_TRAP_ENABLE_INE)) << 57;
7824 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7825 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7826
7827 cpu_alpha_store_fpcr(cpu_env, fpcr);
7828 ret = 0;
7829 }
7830 break;
7831
7832 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7833 {
7834 uint64_t exc, fpcr, orig_fpcr;
7835 int si_code;
7836
7837 if (get_user_u64(exc, arg2)) {
7838 goto efault;
7839 }
7840
7841 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7842
7843 /* We only add to the exception status here. */
7844 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7845
7846 cpu_alpha_store_fpcr(cpu_env, fpcr);
7847 ret = 0;
7848
7849 /* Old exceptions are not signaled. */
7850 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7851
7852 /* If any exceptions set by this call,
7853 and are unmasked, send a signal. */
7854 si_code = 0;
7855 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7856 si_code = TARGET_FPE_FLTRES;
7857 }
7858 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7859 si_code = TARGET_FPE_FLTUND;
7860 }
7861 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7862 si_code = TARGET_FPE_FLTOVF;
7863 }
7864 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7865 si_code = TARGET_FPE_FLTDIV;
7866 }
7867 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7868 si_code = TARGET_FPE_FLTINV;
7869 }
7870 if (si_code != 0) {
7871 target_siginfo_t info;
7872 info.si_signo = SIGFPE;
7873 info.si_errno = 0;
7874 info.si_code = si_code;
7875 info._sifields._sigfault._addr
7876 = ((CPUArchState *)cpu_env)->pc;
7877 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7878 }
7879 }
7880 break;
7881
7882 /* case SSI_NVPAIRS:
7883 -- Used with SSIN_UACPROC to enable unaligned accesses.
7884 case SSI_IEEE_STATE_AT_SIGNAL:
7885 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7886 -- Not implemented in linux kernel
7887 */
7888 }
7889 break;
7890 #endif
7891 #ifdef TARGET_NR_osf_sigprocmask
7892 /* Alpha specific. */
7893 case TARGET_NR_osf_sigprocmask:
7894 {
7895 abi_ulong mask;
7896 int how;
7897 sigset_t set, oldset;
7898
7899 switch(arg1) {
7900 case TARGET_SIG_BLOCK:
7901 how = SIG_BLOCK;
7902 break;
7903 case TARGET_SIG_UNBLOCK:
7904 how = SIG_UNBLOCK;
7905 break;
7906 case TARGET_SIG_SETMASK:
7907 how = SIG_SETMASK;
7908 break;
7909 default:
7910 ret = -TARGET_EINVAL;
7911 goto fail;
7912 }
7913 mask = arg2;
7914 target_to_host_old_sigset(&set, &mask);
7915 sigprocmask(how, &set, &oldset);
7916 host_to_target_old_sigset(&mask, &oldset);
7917 ret = mask;
7918 }
7919 break;
7920 #endif
7921
7922 #ifdef TARGET_NR_getgid32
7923 case TARGET_NR_getgid32:
7924 ret = get_errno(getgid());
7925 break;
7926 #endif
7927 #ifdef TARGET_NR_geteuid32
7928 case TARGET_NR_geteuid32:
7929 ret = get_errno(geteuid());
7930 break;
7931 #endif
7932 #ifdef TARGET_NR_getegid32
7933 case TARGET_NR_getegid32:
7934 ret = get_errno(getegid());
7935 break;
7936 #endif
7937 #ifdef TARGET_NR_setreuid32
7938 case TARGET_NR_setreuid32:
7939 ret = get_errno(setreuid(arg1, arg2));
7940 break;
7941 #endif
7942 #ifdef TARGET_NR_setregid32
7943 case TARGET_NR_setregid32:
7944 ret = get_errno(setregid(arg1, arg2));
7945 break;
7946 #endif
7947 #ifdef TARGET_NR_getgroups32
7948 case TARGET_NR_getgroups32:
7949 {
7950 int gidsetsize = arg1;
7951 uint32_t *target_grouplist;
7952 gid_t *grouplist;
7953 int i;
7954
7955 grouplist = alloca(gidsetsize * sizeof(gid_t));
7956 ret = get_errno(getgroups(gidsetsize, grouplist));
7957 if (gidsetsize == 0)
7958 break;
7959 if (!is_error(ret)) {
7960 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7961 if (!target_grouplist) {
7962 ret = -TARGET_EFAULT;
7963 goto fail;
7964 }
7965 for(i = 0;i < ret; i++)
7966 target_grouplist[i] = tswap32(grouplist[i]);
7967 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7968 }
7969 }
7970 break;
7971 #endif
7972 #ifdef TARGET_NR_setgroups32
7973 case TARGET_NR_setgroups32:
7974 {
7975 int gidsetsize = arg1;
7976 uint32_t *target_grouplist;
7977 gid_t *grouplist;
7978 int i;
7979
7980 grouplist = alloca(gidsetsize * sizeof(gid_t));
7981 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7982 if (!target_grouplist) {
7983 ret = -TARGET_EFAULT;
7984 goto fail;
7985 }
7986 for(i = 0;i < gidsetsize; i++)
7987 grouplist[i] = tswap32(target_grouplist[i]);
7988 unlock_user(target_grouplist, arg2, 0);
7989 ret = get_errno(setgroups(gidsetsize, grouplist));
7990 }
7991 break;
7992 #endif
7993 #ifdef TARGET_NR_fchown32
7994 case TARGET_NR_fchown32:
7995 ret = get_errno(fchown(arg1, arg2, arg3));
7996 break;
7997 #endif
7998 #ifdef TARGET_NR_setresuid32
7999 case TARGET_NR_setresuid32:
8000 ret = get_errno(setresuid(arg1, arg2, arg3));
8001 break;
8002 #endif
8003 #ifdef TARGET_NR_getresuid32
8004 case TARGET_NR_getresuid32:
8005 {
8006 uid_t ruid, euid, suid;
8007 ret = get_errno(getresuid(&ruid, &euid, &suid));
8008 if (!is_error(ret)) {
8009 if (put_user_u32(ruid, arg1)
8010 || put_user_u32(euid, arg2)
8011 || put_user_u32(suid, arg3))
8012 goto efault;
8013 }
8014 }
8015 break;
8016 #endif
8017 #ifdef TARGET_NR_setresgid32
8018 case TARGET_NR_setresgid32:
8019 ret = get_errno(setresgid(arg1, arg2, arg3));
8020 break;
8021 #endif
8022 #ifdef TARGET_NR_getresgid32
8023 case TARGET_NR_getresgid32:
8024 {
8025 gid_t rgid, egid, sgid;
8026 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8027 if (!is_error(ret)) {
8028 if (put_user_u32(rgid, arg1)
8029 || put_user_u32(egid, arg2)
8030 || put_user_u32(sgid, arg3))
8031 goto efault;
8032 }
8033 }
8034 break;
8035 #endif
8036 #ifdef TARGET_NR_chown32
8037 case TARGET_NR_chown32:
8038 if (!(p = lock_user_string(arg1)))
8039 goto efault;
8040 ret = get_errno(chown(p, arg2, arg3));
8041 unlock_user(p, arg1, 0);
8042 break;
8043 #endif
8044 #ifdef TARGET_NR_setuid32
8045 case TARGET_NR_setuid32:
8046 ret = get_errno(setuid(arg1));
8047 break;
8048 #endif
8049 #ifdef TARGET_NR_setgid32
8050 case TARGET_NR_setgid32:
8051 ret = get_errno(setgid(arg1));
8052 break;
8053 #endif
8054 #ifdef TARGET_NR_setfsuid32
8055 case TARGET_NR_setfsuid32:
8056 ret = get_errno(setfsuid(arg1));
8057 break;
8058 #endif
8059 #ifdef TARGET_NR_setfsgid32
8060 case TARGET_NR_setfsgid32:
8061 ret = get_errno(setfsgid(arg1));
8062 break;
8063 #endif
8064
8065 case TARGET_NR_pivot_root:
8066 goto unimplemented;
8067 #ifdef TARGET_NR_mincore
8068 case TARGET_NR_mincore:
8069 {
8070 void *a;
8071 ret = -TARGET_EFAULT;
8072 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8073 goto efault;
8074 if (!(p = lock_user_string(arg3)))
8075 goto mincore_fail;
8076 ret = get_errno(mincore(a, arg2, p));
8077 unlock_user(p, arg3, ret);
8078 mincore_fail:
8079 unlock_user(a, arg1, 0);
8080 }
8081 break;
8082 #endif
8083 #ifdef TARGET_NR_arm_fadvise64_64
8084 case TARGET_NR_arm_fadvise64_64:
8085 {
8086 /*
8087 * arm_fadvise64_64 looks like fadvise64_64 but
8088 * with different argument order
8089 */
8090 abi_long temp;
8091 temp = arg3;
8092 arg3 = arg4;
8093 arg4 = temp;
8094 }
8095 #endif
8096 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8097 #ifdef TARGET_NR_fadvise64_64
8098 case TARGET_NR_fadvise64_64:
8099 #endif
8100 #ifdef TARGET_NR_fadvise64
8101 case TARGET_NR_fadvise64:
8102 #endif
8103 #ifdef TARGET_S390X
8104 switch (arg4) {
8105 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8106 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8107 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8108 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8109 default: break;
8110 }
8111 #endif
8112 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8113 break;
8114 #endif
8115 #ifdef TARGET_NR_madvise
8116 case TARGET_NR_madvise:
8117 /* A straight passthrough may not be safe because qemu sometimes
8118 turns private flie-backed mappings into anonymous mappings.
8119 This will break MADV_DONTNEED.
8120 This is a hint, so ignoring and returning success is ok. */
8121 ret = get_errno(0);
8122 break;
8123 #endif
8124 #if TARGET_ABI_BITS == 32
8125 case TARGET_NR_fcntl64:
8126 {
8127 int cmd;
8128 struct flock64 fl;
8129 struct target_flock64 *target_fl;
8130 #ifdef TARGET_ARM
8131 struct target_eabi_flock64 *target_efl;
8132 #endif
8133
8134 cmd = target_to_host_fcntl_cmd(arg2);
8135 if (cmd == -TARGET_EINVAL) {
8136 ret = cmd;
8137 break;
8138 }
8139
8140 switch(arg2) {
8141 case TARGET_F_GETLK64:
8142 #ifdef TARGET_ARM
8143 if (((CPUARMState *)cpu_env)->eabi) {
8144 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8145 goto efault;
8146 fl.l_type = tswap16(target_efl->l_type);
8147 fl.l_whence = tswap16(target_efl->l_whence);
8148 fl.l_start = tswap64(target_efl->l_start);
8149 fl.l_len = tswap64(target_efl->l_len);
8150 fl.l_pid = tswap32(target_efl->l_pid);
8151 unlock_user_struct(target_efl, arg3, 0);
8152 } else
8153 #endif
8154 {
8155 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8156 goto efault;
8157 fl.l_type = tswap16(target_fl->l_type);
8158 fl.l_whence = tswap16(target_fl->l_whence);
8159 fl.l_start = tswap64(target_fl->l_start);
8160 fl.l_len = tswap64(target_fl->l_len);
8161 fl.l_pid = tswap32(target_fl->l_pid);
8162 unlock_user_struct(target_fl, arg3, 0);
8163 }
8164 ret = get_errno(fcntl(arg1, cmd, &fl));
8165 if (ret == 0) {
8166 #ifdef TARGET_ARM
8167 if (((CPUARMState *)cpu_env)->eabi) {
8168 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8169 goto efault;
8170 target_efl->l_type = tswap16(fl.l_type);
8171 target_efl->l_whence = tswap16(fl.l_whence);
8172 target_efl->l_start = tswap64(fl.l_start);
8173 target_efl->l_len = tswap64(fl.l_len);
8174 target_efl->l_pid = tswap32(fl.l_pid);
8175 unlock_user_struct(target_efl, arg3, 1);
8176 } else
8177 #endif
8178 {
8179 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8180 goto efault;
8181 target_fl->l_type = tswap16(fl.l_type);
8182 target_fl->l_whence = tswap16(fl.l_whence);
8183 target_fl->l_start = tswap64(fl.l_start);
8184 target_fl->l_len = tswap64(fl.l_len);
8185 target_fl->l_pid = tswap32(fl.l_pid);
8186 unlock_user_struct(target_fl, arg3, 1);
8187 }
8188 }
8189 break;
8190
8191 case TARGET_F_SETLK64:
8192 case TARGET_F_SETLKW64:
8193 #ifdef TARGET_ARM
8194 if (((CPUARMState *)cpu_env)->eabi) {
8195 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8196 goto efault;
8197 fl.l_type = tswap16(target_efl->l_type);
8198 fl.l_whence = tswap16(target_efl->l_whence);
8199 fl.l_start = tswap64(target_efl->l_start);
8200 fl.l_len = tswap64(target_efl->l_len);
8201 fl.l_pid = tswap32(target_efl->l_pid);
8202 unlock_user_struct(target_efl, arg3, 0);
8203 } else
8204 #endif
8205 {
8206 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8207 goto efault;
8208 fl.l_type = tswap16(target_fl->l_type);
8209 fl.l_whence = tswap16(target_fl->l_whence);
8210 fl.l_start = tswap64(target_fl->l_start);
8211 fl.l_len = tswap64(target_fl->l_len);
8212 fl.l_pid = tswap32(target_fl->l_pid);
8213 unlock_user_struct(target_fl, arg3, 0);
8214 }
8215 ret = get_errno(fcntl(arg1, cmd, &fl));
8216 break;
8217 default:
8218 ret = do_fcntl(arg1, arg2, arg3);
8219 break;
8220 }
8221 break;
8222 }
8223 #endif
8224 #ifdef TARGET_NR_cacheflush
8225 case TARGET_NR_cacheflush:
8226 /* self-modifying code is handled automatically, so nothing needed */
8227 ret = 0;
8228 break;
8229 #endif
8230 #ifdef TARGET_NR_security
8231 case TARGET_NR_security:
8232 goto unimplemented;
8233 #endif
8234 #ifdef TARGET_NR_getpagesize
8235 case TARGET_NR_getpagesize:
8236 ret = TARGET_PAGE_SIZE;
8237 break;
8238 #endif
8239 case TARGET_NR_gettid:
8240 ret = get_errno(gettid());
8241 break;
8242 #ifdef TARGET_NR_readahead
8243 case TARGET_NR_readahead:
8244 #if TARGET_ABI_BITS == 32
8245 if (regpairs_aligned(cpu_env)) {
8246 arg2 = arg3;
8247 arg3 = arg4;
8248 arg4 = arg5;
8249 }
8250 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8251 #else
8252 ret = get_errno(readahead(arg1, arg2, arg3));
8253 #endif
8254 break;
8255 #endif
8256 #ifdef CONFIG_ATTR
8257 #ifdef TARGET_NR_setxattr
8258 case TARGET_NR_listxattr:
8259 case TARGET_NR_llistxattr:
8260 {
8261 void *p, *b = 0;
8262 if (arg2) {
8263 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8264 if (!b) {
8265 ret = -TARGET_EFAULT;
8266 break;
8267 }
8268 }
8269 p = lock_user_string(arg1);
8270 if (p) {
8271 if (num == TARGET_NR_listxattr) {
8272 ret = get_errno(listxattr(p, b, arg3));
8273 } else {
8274 ret = get_errno(llistxattr(p, b, arg3));
8275 }
8276 } else {
8277 ret = -TARGET_EFAULT;
8278 }
8279 unlock_user(p, arg1, 0);
8280 unlock_user(b, arg2, arg3);
8281 break;
8282 }
8283 case TARGET_NR_flistxattr:
8284 {
8285 void *b = 0;
8286 if (arg2) {
8287 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8288 if (!b) {
8289 ret = -TARGET_EFAULT;
8290 break;
8291 }
8292 }
8293 ret = get_errno(flistxattr(arg1, b, arg3));
8294 unlock_user(b, arg2, arg3);
8295 break;
8296 }
8297 case TARGET_NR_setxattr:
8298 case TARGET_NR_lsetxattr:
8299 {
8300 void *p, *n, *v = 0;
8301 if (arg3) {
8302 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8303 if (!v) {
8304 ret = -TARGET_EFAULT;
8305 break;
8306 }
8307 }
8308 p = lock_user_string(arg1);
8309 n = lock_user_string(arg2);
8310 if (p && n) {
8311 if (num == TARGET_NR_setxattr) {
8312 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8313 } else {
8314 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8315 }
8316 } else {
8317 ret = -TARGET_EFAULT;
8318 }
8319 unlock_user(p, arg1, 0);
8320 unlock_user(n, arg2, 0);
8321 unlock_user(v, arg3, 0);
8322 }
8323 break;
8324 case TARGET_NR_fsetxattr:
8325 {
8326 void *n, *v = 0;
8327 if (arg3) {
8328 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8329 if (!v) {
8330 ret = -TARGET_EFAULT;
8331 break;
8332 }
8333 }
8334 n = lock_user_string(arg2);
8335 if (n) {
8336 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8337 } else {
8338 ret = -TARGET_EFAULT;
8339 }
8340 unlock_user(n, arg2, 0);
8341 unlock_user(v, arg3, 0);
8342 }
8343 break;
8344 case TARGET_NR_getxattr:
8345 case TARGET_NR_lgetxattr:
8346 {
8347 void *p, *n, *v = 0;
8348 if (arg3) {
8349 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8350 if (!v) {
8351 ret = -TARGET_EFAULT;
8352 break;
8353 }
8354 }
8355 p = lock_user_string(arg1);
8356 n = lock_user_string(arg2);
8357 if (p && n) {
8358 if (num == TARGET_NR_getxattr) {
8359 ret = get_errno(getxattr(p, n, v, arg4));
8360 } else {
8361 ret = get_errno(lgetxattr(p, n, v, arg4));
8362 }
8363 } else {
8364 ret = -TARGET_EFAULT;
8365 }
8366 unlock_user(p, arg1, 0);
8367 unlock_user(n, arg2, 0);
8368 unlock_user(v, arg3, arg4);
8369 }
8370 break;
8371 case TARGET_NR_fgetxattr:
8372 {
8373 void *n, *v = 0;
8374 if (arg3) {
8375 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8376 if (!v) {
8377 ret = -TARGET_EFAULT;
8378 break;
8379 }
8380 }
8381 n = lock_user_string(arg2);
8382 if (n) {
8383 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8384 } else {
8385 ret = -TARGET_EFAULT;
8386 }
8387 unlock_user(n, arg2, 0);
8388 unlock_user(v, arg3, arg4);
8389 }
8390 break;
8391 case TARGET_NR_removexattr:
8392 case TARGET_NR_lremovexattr:
8393 {
8394 void *p, *n;
8395 p = lock_user_string(arg1);
8396 n = lock_user_string(arg2);
8397 if (p && n) {
8398 if (num == TARGET_NR_removexattr) {
8399 ret = get_errno(removexattr(p, n));
8400 } else {
8401 ret = get_errno(lremovexattr(p, n));
8402 }
8403 } else {
8404 ret = -TARGET_EFAULT;
8405 }
8406 unlock_user(p, arg1, 0);
8407 unlock_user(n, arg2, 0);
8408 }
8409 break;
8410 case TARGET_NR_fremovexattr:
8411 {
8412 void *n;
8413 n = lock_user_string(arg2);
8414 if (n) {
8415 ret = get_errno(fremovexattr(arg1, n));
8416 } else {
8417 ret = -TARGET_EFAULT;
8418 }
8419 unlock_user(n, arg2, 0);
8420 }
8421 break;
8422 #endif
8423 #endif /* CONFIG_ATTR */
8424 #ifdef TARGET_NR_set_thread_area
8425 case TARGET_NR_set_thread_area:
8426 #if defined(TARGET_MIPS)
8427 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8428 ret = 0;
8429 break;
8430 #elif defined(TARGET_CRIS)
8431 if (arg1 & 0xff)
8432 ret = -TARGET_EINVAL;
8433 else {
8434 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8435 ret = 0;
8436 }
8437 break;
8438 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8439 ret = do_set_thread_area(cpu_env, arg1);
8440 break;
8441 #else
8442 goto unimplemented_nowarn;
8443 #endif
8444 #endif
8445 #ifdef TARGET_NR_get_thread_area
8446 case TARGET_NR_get_thread_area:
8447 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8448 ret = do_get_thread_area(cpu_env, arg1);
8449 #else
8450 goto unimplemented_nowarn;
8451 #endif
8452 #endif
8453 #ifdef TARGET_NR_getdomainname
8454 case TARGET_NR_getdomainname:
8455 goto unimplemented_nowarn;
8456 #endif
8457
8458 #ifdef TARGET_NR_clock_gettime
8459 case TARGET_NR_clock_gettime:
8460 {
8461 struct timespec ts;
8462 ret = get_errno(clock_gettime(arg1, &ts));
8463 if (!is_error(ret)) {
8464 host_to_target_timespec(arg2, &ts);
8465 }
8466 break;
8467 }
8468 #endif
8469 #ifdef TARGET_NR_clock_getres
8470 case TARGET_NR_clock_getres:
8471 {
8472 struct timespec ts;
8473 ret = get_errno(clock_getres(arg1, &ts));
8474 if (!is_error(ret)) {
8475 host_to_target_timespec(arg2, &ts);
8476 }
8477 break;
8478 }
8479 #endif
8480 #ifdef TARGET_NR_clock_nanosleep
8481 case TARGET_NR_clock_nanosleep:
8482 {
8483 struct timespec ts;
8484 target_to_host_timespec(&ts, arg3);
8485 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8486 if (arg4)
8487 host_to_target_timespec(arg4, &ts);
8488 break;
8489 }
8490 #endif
8491
8492 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8493 case TARGET_NR_set_tid_address:
8494 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8495 break;
8496 #endif
8497
8498 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8499 case TARGET_NR_tkill:
8500 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8501 break;
8502 #endif
8503
8504 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8505 case TARGET_NR_tgkill:
8506 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8507 target_to_host_signal(arg3)));
8508 break;
8509 #endif
8510
8511 #ifdef TARGET_NR_set_robust_list
8512 case TARGET_NR_set_robust_list:
8513 goto unimplemented_nowarn;
8514 #endif
8515
8516 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8517 case TARGET_NR_utimensat:
8518 {
8519 struct timespec *tsp, ts[2];
8520 if (!arg3) {
8521 tsp = NULL;
8522 } else {
8523 target_to_host_timespec(ts, arg3);
8524 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8525 tsp = ts;
8526 }
8527 if (!arg2)
8528 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8529 else {
8530 if (!(p = lock_user_string(arg2))) {
8531 ret = -TARGET_EFAULT;
8532 goto fail;
8533 }
8534 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8535 unlock_user(p, arg2, 0);
8536 }
8537 }
8538 break;
8539 #endif
8540 #if defined(CONFIG_USE_NPTL)
8541 case TARGET_NR_futex:
8542 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8543 break;
8544 #endif
8545 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8546 case TARGET_NR_inotify_init:
8547 ret = get_errno(sys_inotify_init());
8548 break;
8549 #endif
8550 #ifdef CONFIG_INOTIFY1
8551 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8552 case TARGET_NR_inotify_init1:
8553 ret = get_errno(sys_inotify_init1(arg1));
8554 break;
8555 #endif
8556 #endif
8557 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8558 case TARGET_NR_inotify_add_watch:
8559 p = lock_user_string(arg2);
8560 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8561 unlock_user(p, arg2, 0);
8562 break;
8563 #endif
8564 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8565 case TARGET_NR_inotify_rm_watch:
8566 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8567 break;
8568 #endif
8569
8570 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8571 case TARGET_NR_mq_open:
8572 {
8573 struct mq_attr posix_mq_attr;
8574
8575 p = lock_user_string(arg1 - 1);
8576 if (arg4 != 0)
8577 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8578 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8579 unlock_user (p, arg1, 0);
8580 }
8581 break;
8582
8583 case TARGET_NR_mq_unlink:
8584 p = lock_user_string(arg1 - 1);
8585 ret = get_errno(mq_unlink(p));
8586 unlock_user (p, arg1, 0);
8587 break;
8588
8589 case TARGET_NR_mq_timedsend:
8590 {
8591 struct timespec ts;
8592
8593 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8594 if (arg5 != 0) {
8595 target_to_host_timespec(&ts, arg5);
8596 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8597 host_to_target_timespec(arg5, &ts);
8598 }
8599 else
8600 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8601 unlock_user (p, arg2, arg3);
8602 }
8603 break;
8604
8605 case TARGET_NR_mq_timedreceive:
8606 {
8607 struct timespec ts;
8608 unsigned int prio;
8609
8610 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8611 if (arg5 != 0) {
8612 target_to_host_timespec(&ts, arg5);
8613 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8614 host_to_target_timespec(arg5, &ts);
8615 }
8616 else
8617 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8618 unlock_user (p, arg2, arg3);
8619 if (arg4 != 0)
8620 put_user_u32(prio, arg4);
8621 }
8622 break;
8623
8624 /* Not implemented for now... */
8625 /* case TARGET_NR_mq_notify: */
8626 /* break; */
8627
8628 case TARGET_NR_mq_getsetattr:
8629 {
8630 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8631 ret = 0;
8632 if (arg3 != 0) {
8633 ret = mq_getattr(arg1, &posix_mq_attr_out);
8634 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8635 }
8636 if (arg2 != 0) {
8637 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8638 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8639 }
8640
8641 }
8642 break;
8643 #endif
8644
8645 #ifdef CONFIG_SPLICE
8646 #ifdef TARGET_NR_tee
8647 case TARGET_NR_tee:
8648 {
8649 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8650 }
8651 break;
8652 #endif
8653 #ifdef TARGET_NR_splice
8654 case TARGET_NR_splice:
8655 {
8656 loff_t loff_in, loff_out;
8657 loff_t *ploff_in = NULL, *ploff_out = NULL;
8658 if(arg2) {
8659 get_user_u64(loff_in, arg2);
8660 ploff_in = &loff_in;
8661 }
8662 if(arg4) {
8663 get_user_u64(loff_out, arg2);
8664 ploff_out = &loff_out;
8665 }
8666 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8667 }
8668 break;
8669 #endif
8670 #ifdef TARGET_NR_vmsplice
8671 case TARGET_NR_vmsplice:
8672 {
8673 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8674 if (vec != NULL) {
8675 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8676 unlock_iovec(vec, arg2, arg3, 0);
8677 } else {
8678 ret = -host_to_target_errno(errno);
8679 }
8680 }
8681 break;
8682 #endif
8683 #endif /* CONFIG_SPLICE */
8684 #ifdef CONFIG_EVENTFD
8685 #if defined(TARGET_NR_eventfd)
8686 case TARGET_NR_eventfd:
8687 ret = get_errno(eventfd(arg1, 0));
8688 break;
8689 #endif
8690 #if defined(TARGET_NR_eventfd2)
8691 case TARGET_NR_eventfd2:
8692 ret = get_errno(eventfd(arg1, arg2));
8693 break;
8694 #endif
8695 #endif /* CONFIG_EVENTFD */
8696 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8697 case TARGET_NR_fallocate:
8698 #if TARGET_ABI_BITS == 32
8699 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8700 target_offset64(arg5, arg6)));
8701 #else
8702 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8703 #endif
8704 break;
8705 #endif
8706 #if defined(CONFIG_SYNC_FILE_RANGE)
8707 #if defined(TARGET_NR_sync_file_range)
8708 case TARGET_NR_sync_file_range:
8709 #if TARGET_ABI_BITS == 32
8710 #if defined(TARGET_MIPS)
8711 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8712 target_offset64(arg5, arg6), arg7));
8713 #else
8714 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8715 target_offset64(arg4, arg5), arg6));
8716 #endif /* !TARGET_MIPS */
8717 #else
8718 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8719 #endif
8720 break;
8721 #endif
8722 #if defined(TARGET_NR_sync_file_range2)
8723 case TARGET_NR_sync_file_range2:
8724 /* This is like sync_file_range but the arguments are reordered */
8725 #if TARGET_ABI_BITS == 32
8726 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8727 target_offset64(arg5, arg6), arg2));
8728 #else
8729 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8730 #endif
8731 break;
8732 #endif
8733 #endif
8734 #if defined(CONFIG_EPOLL)
8735 #if defined(TARGET_NR_epoll_create)
8736 case TARGET_NR_epoll_create:
8737 ret = get_errno(epoll_create(arg1));
8738 break;
8739 #endif
8740 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8741 case TARGET_NR_epoll_create1:
8742 ret = get_errno(epoll_create1(arg1));
8743 break;
8744 #endif
8745 #if defined(TARGET_NR_epoll_ctl)
8746 case TARGET_NR_epoll_ctl:
8747 {
8748 struct epoll_event ep;
8749 struct epoll_event *epp = 0;
8750 if (arg4) {
8751 struct target_epoll_event *target_ep;
8752 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8753 goto efault;
8754 }
8755 ep.events = tswap32(target_ep->events);
8756 /* The epoll_data_t union is just opaque data to the kernel,
8757 * so we transfer all 64 bits across and need not worry what
8758 * actual data type it is.
8759 */
8760 ep.data.u64 = tswap64(target_ep->data.u64);
8761 unlock_user_struct(target_ep, arg4, 0);
8762 epp = &ep;
8763 }
8764 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8765 break;
8766 }
8767 #endif
8768
8769 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8770 #define IMPLEMENT_EPOLL_PWAIT
8771 #endif
8772 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8773 #if defined(TARGET_NR_epoll_wait)
8774 case TARGET_NR_epoll_wait:
8775 #endif
8776 #if defined(IMPLEMENT_EPOLL_PWAIT)
8777 case TARGET_NR_epoll_pwait:
8778 #endif
8779 {
8780 struct target_epoll_event *target_ep;
8781 struct epoll_event *ep;
8782 int epfd = arg1;
8783 int maxevents = arg3;
8784 int timeout = arg4;
8785
8786 target_ep = lock_user(VERIFY_WRITE, arg2,
8787 maxevents * sizeof(struct target_epoll_event), 1);
8788 if (!target_ep) {
8789 goto efault;
8790 }
8791
8792 ep = alloca(maxevents * sizeof(struct epoll_event));
8793
8794 switch (num) {
8795 #if defined(IMPLEMENT_EPOLL_PWAIT)
8796 case TARGET_NR_epoll_pwait:
8797 {
8798 target_sigset_t *target_set;
8799 sigset_t _set, *set = &_set;
8800
8801 if (arg5) {
8802 target_set = lock_user(VERIFY_READ, arg5,
8803 sizeof(target_sigset_t), 1);
8804 if (!target_set) {
8805 unlock_user(target_ep, arg2, 0);
8806 goto efault;
8807 }
8808 target_to_host_sigset(set, target_set);
8809 unlock_user(target_set, arg5, 0);
8810 } else {
8811 set = NULL;
8812 }
8813
8814 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8815 break;
8816 }
8817 #endif
8818 #if defined(TARGET_NR_epoll_wait)
8819 case TARGET_NR_epoll_wait:
8820 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8821 break;
8822 #endif
8823 default:
8824 ret = -TARGET_ENOSYS;
8825 }
8826 if (!is_error(ret)) {
8827 int i;
8828 for (i = 0; i < ret; i++) {
8829 target_ep[i].events = tswap32(ep[i].events);
8830 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8831 }
8832 }
8833 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8834 break;
8835 }
8836 #endif
8837 #endif
8838 #ifdef TARGET_NR_prlimit64
8839 case TARGET_NR_prlimit64:
8840 {
8841 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8842 struct target_rlimit64 *target_rnew, *target_rold;
8843 struct host_rlimit64 rnew, rold, *rnewp = 0;
8844 if (arg3) {
8845 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8846 goto efault;
8847 }
8848 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8849 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8850 unlock_user_struct(target_rnew, arg3, 0);
8851 rnewp = &rnew;
8852 }
8853
8854 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8855 if (!is_error(ret) && arg4) {
8856 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8857 goto efault;
8858 }
8859 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8860 target_rold->rlim_max = tswap64(rold.rlim_max);
8861 unlock_user_struct(target_rold, arg4, 1);
8862 }
8863 break;
8864 }
8865 #endif
8866 #ifdef TARGET_NR_gethostname
8867 case TARGET_NR_gethostname:
8868 {
8869 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8870 if (name) {
8871 ret = get_errno(gethostname(name, arg2));
8872 unlock_user(name, arg1, arg2);
8873 } else {
8874 ret = -TARGET_EFAULT;
8875 }
8876 break;
8877 }
8878 #endif
8879 default:
8880 unimplemented:
8881 gemu_log("qemu: Unsupported syscall: %d\n", num);
8882 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8883 unimplemented_nowarn:
8884 #endif
8885 ret = -TARGET_ENOSYS;
8886 break;
8887 }
8888 fail:
8889 #ifdef DEBUG
8890 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8891 #endif
8892 if(do_strace)
8893 print_syscall_ret(num, ret);
8894 return ret;
8895 efault:
8896 ret = -TARGET_EFAULT;
8897 goto fail;
8898 }