]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
linux-user: Handle O_SYNC, O_NOATIME, O_CLOEXEC, O_PATH
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73 #ifdef CONFIG_ATTR
74 #include "qemu-xattr.h"
75 #endif
76
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include "linux_loop.h"
100 #include "cpu-uname.h"
101
102 #include "qemu.h"
103
104 #if defined(CONFIG_USE_NPTL)
105 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
106 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
107 #else
108 /* XXX: Hardcode the above values. */
109 #define CLONE_NPTL_FLAGS2 0
110 #endif
111
112 //#define DEBUG
113
114 //#include <linux/msdos_fs.h>
115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
117
118
119 #undef _syscall0
120 #undef _syscall1
121 #undef _syscall2
122 #undef _syscall3
123 #undef _syscall4
124 #undef _syscall5
125 #undef _syscall6
126
127 #define _syscall0(type,name) \
128 static type name (void) \
129 { \
130 return syscall(__NR_##name); \
131 }
132
133 #define _syscall1(type,name,type1,arg1) \
134 static type name (type1 arg1) \
135 { \
136 return syscall(__NR_##name, arg1); \
137 }
138
139 #define _syscall2(type,name,type1,arg1,type2,arg2) \
140 static type name (type1 arg1,type2 arg2) \
141 { \
142 return syscall(__NR_##name, arg1, arg2); \
143 }
144
145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
146 static type name (type1 arg1,type2 arg2,type3 arg3) \
147 { \
148 return syscall(__NR_##name, arg1, arg2, arg3); \
149 }
150
151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
153 { \
154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
155 }
156
157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
158 type5,arg5) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
160 { \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 }
163
164
165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 type5,arg5,type6,arg6) \
167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
168 type6 arg6) \
169 { \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 }
172
173
174 #define __NR_sys_uname __NR_uname
175 #define __NR_sys_faccessat __NR_faccessat
176 #define __NR_sys_fchmodat __NR_fchmodat
177 #define __NR_sys_fchownat __NR_fchownat
178 #define __NR_sys_fstatat64 __NR_fstatat64
179 #define __NR_sys_futimesat __NR_futimesat
180 #define __NR_sys_getcwd1 __NR_getcwd
181 #define __NR_sys_getdents __NR_getdents
182 #define __NR_sys_getdents64 __NR_getdents64
183 #define __NR_sys_getpriority __NR_getpriority
184 #define __NR_sys_linkat __NR_linkat
185 #define __NR_sys_mkdirat __NR_mkdirat
186 #define __NR_sys_mknodat __NR_mknodat
187 #define __NR_sys_newfstatat __NR_newfstatat
188 #define __NR_sys_openat __NR_openat
189 #define __NR_sys_readlinkat __NR_readlinkat
190 #define __NR_sys_renameat __NR_renameat
191 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
192 #define __NR_sys_symlinkat __NR_symlinkat
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_tgkill __NR_tgkill
195 #define __NR_sys_tkill __NR_tkill
196 #define __NR_sys_unlinkat __NR_unlinkat
197 #define __NR_sys_utimensat __NR_utimensat
198 #define __NR_sys_futex __NR_futex
199 #define __NR_sys_inotify_init __NR_inotify_init
200 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
201 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
202
203 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
204 defined(__s390x__)
205 #define __NR__llseek __NR_lseek
206 #endif
207
208 #ifdef __NR_gettid
209 _syscall0(int, gettid)
210 #else
211 /* This is a replacement for the host gettid() and must return a host
212 errno. */
213 static int gettid(void) {
214 return -ENOSYS;
215 }
216 #endif
217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
218 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
220 #endif
221 _syscall2(int, sys_getpriority, int, which, int, who);
222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
223 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
224 loff_t *, res, uint, wh);
225 #endif
226 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
227 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
228 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
229 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
230 #endif
231 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
232 _syscall2(int,sys_tkill,int,tid,int,sig)
233 #endif
234 #ifdef __NR_exit_group
235 _syscall1(int,exit_group,int,error_code)
236 #endif
237 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
238 _syscall1(int,set_tid_address,int *,tidptr)
239 #endif
240 #if defined(CONFIG_USE_NPTL)
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
243 const struct timespec *,timeout,int *,uaddr2,int,val3)
244 #endif
245 #endif
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
251 unsigned long *, user_mask_ptr);
252 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
253 void *, arg);
254
255 static bitmask_transtbl fcntl_flags_tbl[] = {
256 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
257 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
258 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
259 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
260 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
261 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
262 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
263 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
264 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
265 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
266 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
267 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
268 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
269 #if defined(O_DIRECT)
270 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
271 #endif
272 #if defined(O_NOATIME)
273 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
274 #endif
275 #if defined(O_CLOEXEC)
276 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
277 #endif
278 #if defined(O_PATH)
279 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
280 #endif
281 /* Don't terminate the list prematurely on 64-bit host+guest. */
282 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
283 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
284 #endif
285 { 0, 0, 0, 0 }
286 };
287
288 #define COPY_UTSNAME_FIELD(dest, src) \
289 do { \
290 /* __NEW_UTS_LEN doesn't include terminating null */ \
291 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
292 (dest)[__NEW_UTS_LEN] = '\0'; \
293 } while (0)
294
295 static int sys_uname(struct new_utsname *buf)
296 {
297 struct utsname uts_buf;
298
299 if (uname(&uts_buf) < 0)
300 return (-1);
301
302 /*
303 * Just in case these have some differences, we
304 * translate utsname to new_utsname (which is the
305 * struct linux kernel uses).
306 */
307
308 memset(buf, 0, sizeof(*buf));
309 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
310 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
311 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
312 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
313 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
314 #ifdef _GNU_SOURCE
315 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
316 #endif
317 return (0);
318
319 #undef COPY_UTSNAME_FIELD
320 }
321
322 static int sys_getcwd1(char *buf, size_t size)
323 {
324 if (getcwd(buf, size) == NULL) {
325 /* getcwd() sets errno */
326 return (-1);
327 }
328 return strlen(buf)+1;
329 }
330
331 #ifdef CONFIG_ATFILE
332 /*
333 * Host system seems to have atfile syscall stubs available. We
334 * now enable them one by one as specified by target syscall_nr.h.
335 */
336
337 #ifdef TARGET_NR_faccessat
338 static int sys_faccessat(int dirfd, const char *pathname, int mode)
339 {
340 return (faccessat(dirfd, pathname, mode, 0));
341 }
342 #endif
343 #ifdef TARGET_NR_fchmodat
344 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
345 {
346 return (fchmodat(dirfd, pathname, mode, 0));
347 }
348 #endif
349 #if defined(TARGET_NR_fchownat)
350 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
351 gid_t group, int flags)
352 {
353 return (fchownat(dirfd, pathname, owner, group, flags));
354 }
355 #endif
356 #ifdef __NR_fstatat64
357 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
358 int flags)
359 {
360 return (fstatat(dirfd, pathname, buf, flags));
361 }
362 #endif
363 #ifdef __NR_newfstatat
364 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
365 int flags)
366 {
367 return (fstatat(dirfd, pathname, buf, flags));
368 }
369 #endif
370 #ifdef TARGET_NR_futimesat
371 static int sys_futimesat(int dirfd, const char *pathname,
372 const struct timeval times[2])
373 {
374 return (futimesat(dirfd, pathname, times));
375 }
376 #endif
377 #ifdef TARGET_NR_linkat
378 static int sys_linkat(int olddirfd, const char *oldpath,
379 int newdirfd, const char *newpath, int flags)
380 {
381 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
382 }
383 #endif
384 #ifdef TARGET_NR_mkdirat
385 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
386 {
387 return (mkdirat(dirfd, pathname, mode));
388 }
389 #endif
390 #ifdef TARGET_NR_mknodat
391 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
392 dev_t dev)
393 {
394 return (mknodat(dirfd, pathname, mode, dev));
395 }
396 #endif
397 #ifdef TARGET_NR_openat
398 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
399 {
400 /*
401 * open(2) has extra parameter 'mode' when called with
402 * flag O_CREAT.
403 */
404 if ((flags & O_CREAT) != 0) {
405 return (openat(dirfd, pathname, flags, mode));
406 }
407 return (openat(dirfd, pathname, flags));
408 }
409 #endif
410 #ifdef TARGET_NR_readlinkat
411 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
412 {
413 return (readlinkat(dirfd, pathname, buf, bufsiz));
414 }
415 #endif
416 #ifdef TARGET_NR_renameat
417 static int sys_renameat(int olddirfd, const char *oldpath,
418 int newdirfd, const char *newpath)
419 {
420 return (renameat(olddirfd, oldpath, newdirfd, newpath));
421 }
422 #endif
423 #ifdef TARGET_NR_symlinkat
424 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
425 {
426 return (symlinkat(oldpath, newdirfd, newpath));
427 }
428 #endif
429 #ifdef TARGET_NR_unlinkat
430 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
431 {
432 return (unlinkat(dirfd, pathname, flags));
433 }
434 #endif
435 #else /* !CONFIG_ATFILE */
436
437 /*
438 * Try direct syscalls instead
439 */
440 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
441 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
442 #endif
443 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
444 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
445 #endif
446 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
447 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
448 uid_t,owner,gid_t,group,int,flags)
449 #endif
450 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
451 defined(__NR_fstatat64)
452 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
453 struct stat *,buf,int,flags)
454 #endif
455 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
456 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
457 const struct timeval *,times)
458 #endif
459 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
460 defined(__NR_newfstatat)
461 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
462 struct stat *,buf,int,flags)
463 #endif
464 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
465 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
466 int,newdirfd,const char *,newpath,int,flags)
467 #endif
468 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
469 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
470 #endif
471 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
472 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
473 mode_t,mode,dev_t,dev)
474 #endif
475 #if defined(TARGET_NR_openat) && defined(__NR_openat)
476 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
477 #endif
478 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
479 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
480 char *,buf,size_t,bufsize)
481 #endif
482 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
483 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
484 int,newdirfd,const char *,newpath)
485 #endif
486 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
487 _syscall3(int,sys_symlinkat,const char *,oldpath,
488 int,newdirfd,const char *,newpath)
489 #endif
490 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
491 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
492 #endif
493
494 #endif /* CONFIG_ATFILE */
495
496 #ifdef CONFIG_UTIMENSAT
497 static int sys_utimensat(int dirfd, const char *pathname,
498 const struct timespec times[2], int flags)
499 {
500 if (pathname == NULL)
501 return futimens(dirfd, times);
502 else
503 return utimensat(dirfd, pathname, times, flags);
504 }
505 #else
506 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
507 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
508 const struct timespec *,tsp,int,flags)
509 #endif
510 #endif /* CONFIG_UTIMENSAT */
511
512 #ifdef CONFIG_INOTIFY
513 #include <sys/inotify.h>
514
515 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
516 static int sys_inotify_init(void)
517 {
518 return (inotify_init());
519 }
520 #endif
521 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
522 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
523 {
524 return (inotify_add_watch(fd, pathname, mask));
525 }
526 #endif
527 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
528 static int sys_inotify_rm_watch(int fd, int32_t wd)
529 {
530 return (inotify_rm_watch(fd, wd));
531 }
532 #endif
533 #ifdef CONFIG_INOTIFY1
534 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
535 static int sys_inotify_init1(int flags)
536 {
537 return (inotify_init1(flags));
538 }
539 #endif
540 #endif
541 #else
542 /* Userspace can usually survive runtime without inotify */
543 #undef TARGET_NR_inotify_init
544 #undef TARGET_NR_inotify_init1
545 #undef TARGET_NR_inotify_add_watch
546 #undef TARGET_NR_inotify_rm_watch
547 #endif /* CONFIG_INOTIFY */
548
549 #if defined(TARGET_NR_ppoll)
550 #ifndef __NR_ppoll
551 # define __NR_ppoll -1
552 #endif
553 #define __NR_sys_ppoll __NR_ppoll
554 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
555 struct timespec *, timeout, const __sigset_t *, sigmask,
556 size_t, sigsetsize)
557 #endif
558
559 #if defined(TARGET_NR_pselect6)
560 #ifndef __NR_pselect6
561 # define __NR_pselect6 -1
562 #endif
563 #define __NR_sys_pselect6 __NR_pselect6
564 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
565 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
566 #endif
567
568 #if defined(TARGET_NR_prlimit64)
569 #ifndef __NR_prlimit64
570 # define __NR_prlimit64 -1
571 #endif
572 #define __NR_sys_prlimit64 __NR_prlimit64
573 /* The glibc rlimit structure may not be that used by the underlying syscall */
574 struct host_rlimit64 {
575 uint64_t rlim_cur;
576 uint64_t rlim_max;
577 };
578 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
579 const struct host_rlimit64 *, new_limit,
580 struct host_rlimit64 *, old_limit)
581 #endif
582
583 extern int personality(int);
584 extern int flock(int, int);
585 extern int setfsuid(int);
586 extern int setfsgid(int);
587 extern int setgroups(int, gid_t *);
588
589 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
590 #ifdef TARGET_ARM
591 static inline int regpairs_aligned(void *cpu_env) {
592 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
593 }
594 #elif defined(TARGET_MIPS)
595 static inline int regpairs_aligned(void *cpu_env) { return 1; }
596 #else
597 static inline int regpairs_aligned(void *cpu_env) { return 0; }
598 #endif
599
600 #define ERRNO_TABLE_SIZE 1200
601
602 /* target_to_host_errno_table[] is initialized from
603 * host_to_target_errno_table[] in syscall_init(). */
604 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
605 };
606
607 /*
608 * This list is the union of errno values overridden in asm-<arch>/errno.h
609 * minus the errnos that are not actually generic to all archs.
610 */
611 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
612 [EIDRM] = TARGET_EIDRM,
613 [ECHRNG] = TARGET_ECHRNG,
614 [EL2NSYNC] = TARGET_EL2NSYNC,
615 [EL3HLT] = TARGET_EL3HLT,
616 [EL3RST] = TARGET_EL3RST,
617 [ELNRNG] = TARGET_ELNRNG,
618 [EUNATCH] = TARGET_EUNATCH,
619 [ENOCSI] = TARGET_ENOCSI,
620 [EL2HLT] = TARGET_EL2HLT,
621 [EDEADLK] = TARGET_EDEADLK,
622 [ENOLCK] = TARGET_ENOLCK,
623 [EBADE] = TARGET_EBADE,
624 [EBADR] = TARGET_EBADR,
625 [EXFULL] = TARGET_EXFULL,
626 [ENOANO] = TARGET_ENOANO,
627 [EBADRQC] = TARGET_EBADRQC,
628 [EBADSLT] = TARGET_EBADSLT,
629 [EBFONT] = TARGET_EBFONT,
630 [ENOSTR] = TARGET_ENOSTR,
631 [ENODATA] = TARGET_ENODATA,
632 [ETIME] = TARGET_ETIME,
633 [ENOSR] = TARGET_ENOSR,
634 [ENONET] = TARGET_ENONET,
635 [ENOPKG] = TARGET_ENOPKG,
636 [EREMOTE] = TARGET_EREMOTE,
637 [ENOLINK] = TARGET_ENOLINK,
638 [EADV] = TARGET_EADV,
639 [ESRMNT] = TARGET_ESRMNT,
640 [ECOMM] = TARGET_ECOMM,
641 [EPROTO] = TARGET_EPROTO,
642 [EDOTDOT] = TARGET_EDOTDOT,
643 [EMULTIHOP] = TARGET_EMULTIHOP,
644 [EBADMSG] = TARGET_EBADMSG,
645 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
646 [EOVERFLOW] = TARGET_EOVERFLOW,
647 [ENOTUNIQ] = TARGET_ENOTUNIQ,
648 [EBADFD] = TARGET_EBADFD,
649 [EREMCHG] = TARGET_EREMCHG,
650 [ELIBACC] = TARGET_ELIBACC,
651 [ELIBBAD] = TARGET_ELIBBAD,
652 [ELIBSCN] = TARGET_ELIBSCN,
653 [ELIBMAX] = TARGET_ELIBMAX,
654 [ELIBEXEC] = TARGET_ELIBEXEC,
655 [EILSEQ] = TARGET_EILSEQ,
656 [ENOSYS] = TARGET_ENOSYS,
657 [ELOOP] = TARGET_ELOOP,
658 [ERESTART] = TARGET_ERESTART,
659 [ESTRPIPE] = TARGET_ESTRPIPE,
660 [ENOTEMPTY] = TARGET_ENOTEMPTY,
661 [EUSERS] = TARGET_EUSERS,
662 [ENOTSOCK] = TARGET_ENOTSOCK,
663 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
664 [EMSGSIZE] = TARGET_EMSGSIZE,
665 [EPROTOTYPE] = TARGET_EPROTOTYPE,
666 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
667 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
668 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
669 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
670 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
671 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
672 [EADDRINUSE] = TARGET_EADDRINUSE,
673 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
674 [ENETDOWN] = TARGET_ENETDOWN,
675 [ENETUNREACH] = TARGET_ENETUNREACH,
676 [ENETRESET] = TARGET_ENETRESET,
677 [ECONNABORTED] = TARGET_ECONNABORTED,
678 [ECONNRESET] = TARGET_ECONNRESET,
679 [ENOBUFS] = TARGET_ENOBUFS,
680 [EISCONN] = TARGET_EISCONN,
681 [ENOTCONN] = TARGET_ENOTCONN,
682 [EUCLEAN] = TARGET_EUCLEAN,
683 [ENOTNAM] = TARGET_ENOTNAM,
684 [ENAVAIL] = TARGET_ENAVAIL,
685 [EISNAM] = TARGET_EISNAM,
686 [EREMOTEIO] = TARGET_EREMOTEIO,
687 [ESHUTDOWN] = TARGET_ESHUTDOWN,
688 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
689 [ETIMEDOUT] = TARGET_ETIMEDOUT,
690 [ECONNREFUSED] = TARGET_ECONNREFUSED,
691 [EHOSTDOWN] = TARGET_EHOSTDOWN,
692 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
693 [EALREADY] = TARGET_EALREADY,
694 [EINPROGRESS] = TARGET_EINPROGRESS,
695 [ESTALE] = TARGET_ESTALE,
696 [ECANCELED] = TARGET_ECANCELED,
697 [ENOMEDIUM] = TARGET_ENOMEDIUM,
698 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
699 #ifdef ENOKEY
700 [ENOKEY] = TARGET_ENOKEY,
701 #endif
702 #ifdef EKEYEXPIRED
703 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
704 #endif
705 #ifdef EKEYREVOKED
706 [EKEYREVOKED] = TARGET_EKEYREVOKED,
707 #endif
708 #ifdef EKEYREJECTED
709 [EKEYREJECTED] = TARGET_EKEYREJECTED,
710 #endif
711 #ifdef EOWNERDEAD
712 [EOWNERDEAD] = TARGET_EOWNERDEAD,
713 #endif
714 #ifdef ENOTRECOVERABLE
715 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
716 #endif
717 };
718
719 static inline int host_to_target_errno(int err)
720 {
721 if(host_to_target_errno_table[err])
722 return host_to_target_errno_table[err];
723 return err;
724 }
725
726 static inline int target_to_host_errno(int err)
727 {
728 if (target_to_host_errno_table[err])
729 return target_to_host_errno_table[err];
730 return err;
731 }
732
733 static inline abi_long get_errno(abi_long ret)
734 {
735 if (ret == -1)
736 return -host_to_target_errno(errno);
737 else
738 return ret;
739 }
740
741 static inline int is_error(abi_long ret)
742 {
743 return (abi_ulong)ret >= (abi_ulong)(-4096);
744 }
745
746 char *target_strerror(int err)
747 {
748 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
749 return NULL;
750 }
751 return strerror(target_to_host_errno(err));
752 }
753
754 static abi_ulong target_brk;
755 static abi_ulong target_original_brk;
756 static abi_ulong brk_page;
757
758 void target_set_brk(abi_ulong new_brk)
759 {
760 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
761 brk_page = HOST_PAGE_ALIGN(target_brk);
762 }
763
764 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
765 #define DEBUGF_BRK(message, args...)
766
767 /* do_brk() must return target values and target errnos. */
768 abi_long do_brk(abi_ulong new_brk)
769 {
770 abi_long mapped_addr;
771 int new_alloc_size;
772
773 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
774
775 if (!new_brk) {
776 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
777 return target_brk;
778 }
779 if (new_brk < target_original_brk) {
780 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
781 target_brk);
782 return target_brk;
783 }
784
785 /* If the new brk is less than the highest page reserved to the
786 * target heap allocation, set it and we're almost done... */
787 if (new_brk <= brk_page) {
788 /* Heap contents are initialized to zero, as for anonymous
789 * mapped pages. */
790 if (new_brk > target_brk) {
791 memset(g2h(target_brk), 0, new_brk - target_brk);
792 }
793 target_brk = new_brk;
794 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
795 return target_brk;
796 }
797
798 /* We need to allocate more memory after the brk... Note that
799 * we don't use MAP_FIXED because that will map over the top of
800 * any existing mapping (like the one with the host libc or qemu
801 * itself); instead we treat "mapped but at wrong address" as
802 * a failure and unmap again.
803 */
804 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
805 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
806 PROT_READ|PROT_WRITE,
807 MAP_ANON|MAP_PRIVATE, 0, 0));
808
809 if (mapped_addr == brk_page) {
810 /* Heap contents are initialized to zero, as for anonymous
811 * mapped pages. Technically the new pages are already
812 * initialized to zero since they *are* anonymous mapped
813 * pages, however we have to take care with the contents that
814 * come from the remaining part of the previous page: it may
815 * contains garbage data due to a previous heap usage (grown
816 * then shrunken). */
817 memset(g2h(target_brk), 0, brk_page - target_brk);
818
819 target_brk = new_brk;
820 brk_page = HOST_PAGE_ALIGN(target_brk);
821 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
822 target_brk);
823 return target_brk;
824 } else if (mapped_addr != -1) {
825 /* Mapped but at wrong address, meaning there wasn't actually
826 * enough space for this brk.
827 */
828 target_munmap(mapped_addr, new_alloc_size);
829 mapped_addr = -1;
830 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
831 }
832 else {
833 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
834 }
835
836 #if defined(TARGET_ALPHA)
837 /* We (partially) emulate OSF/1 on Alpha, which requires we
838 return a proper errno, not an unchanged brk value. */
839 return -TARGET_ENOMEM;
840 #endif
841 /* For everything else, return the previous break. */
842 return target_brk;
843 }
844
845 static inline abi_long copy_from_user_fdset(fd_set *fds,
846 abi_ulong target_fds_addr,
847 int n)
848 {
849 int i, nw, j, k;
850 abi_ulong b, *target_fds;
851
852 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
853 if (!(target_fds = lock_user(VERIFY_READ,
854 target_fds_addr,
855 sizeof(abi_ulong) * nw,
856 1)))
857 return -TARGET_EFAULT;
858
859 FD_ZERO(fds);
860 k = 0;
861 for (i = 0; i < nw; i++) {
862 /* grab the abi_ulong */
863 __get_user(b, &target_fds[i]);
864 for (j = 0; j < TARGET_ABI_BITS; j++) {
865 /* check the bit inside the abi_ulong */
866 if ((b >> j) & 1)
867 FD_SET(k, fds);
868 k++;
869 }
870 }
871
872 unlock_user(target_fds, target_fds_addr, 0);
873
874 return 0;
875 }
876
877 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
878 abi_ulong target_fds_addr,
879 int n)
880 {
881 if (target_fds_addr) {
882 if (copy_from_user_fdset(fds, target_fds_addr, n))
883 return -TARGET_EFAULT;
884 *fds_ptr = fds;
885 } else {
886 *fds_ptr = NULL;
887 }
888 return 0;
889 }
890
891 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
892 const fd_set *fds,
893 int n)
894 {
895 int i, nw, j, k;
896 abi_long v;
897 abi_ulong *target_fds;
898
899 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
900 if (!(target_fds = lock_user(VERIFY_WRITE,
901 target_fds_addr,
902 sizeof(abi_ulong) * nw,
903 0)))
904 return -TARGET_EFAULT;
905
906 k = 0;
907 for (i = 0; i < nw; i++) {
908 v = 0;
909 for (j = 0; j < TARGET_ABI_BITS; j++) {
910 v |= ((FD_ISSET(k, fds) != 0) << j);
911 k++;
912 }
913 __put_user(v, &target_fds[i]);
914 }
915
916 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
917
918 return 0;
919 }
920
921 #if defined(__alpha__)
922 #define HOST_HZ 1024
923 #else
924 #define HOST_HZ 100
925 #endif
926
927 static inline abi_long host_to_target_clock_t(long ticks)
928 {
929 #if HOST_HZ == TARGET_HZ
930 return ticks;
931 #else
932 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
933 #endif
934 }
935
936 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
937 const struct rusage *rusage)
938 {
939 struct target_rusage *target_rusage;
940
941 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
942 return -TARGET_EFAULT;
943 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
944 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
945 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
946 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
947 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
948 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
949 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
950 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
951 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
952 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
953 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
954 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
955 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
956 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
957 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
958 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
959 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
960 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
961 unlock_user_struct(target_rusage, target_addr, 1);
962
963 return 0;
964 }
965
966 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
967 {
968 abi_ulong target_rlim_swap;
969 rlim_t result;
970
971 target_rlim_swap = tswapal(target_rlim);
972 if (target_rlim_swap == TARGET_RLIM_INFINITY)
973 return RLIM_INFINITY;
974
975 result = target_rlim_swap;
976 if (target_rlim_swap != (rlim_t)result)
977 return RLIM_INFINITY;
978
979 return result;
980 }
981
982 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
983 {
984 abi_ulong target_rlim_swap;
985 abi_ulong result;
986
987 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
988 target_rlim_swap = TARGET_RLIM_INFINITY;
989 else
990 target_rlim_swap = rlim;
991 result = tswapal(target_rlim_swap);
992
993 return result;
994 }
995
996 static inline int target_to_host_resource(int code)
997 {
998 switch (code) {
999 case TARGET_RLIMIT_AS:
1000 return RLIMIT_AS;
1001 case TARGET_RLIMIT_CORE:
1002 return RLIMIT_CORE;
1003 case TARGET_RLIMIT_CPU:
1004 return RLIMIT_CPU;
1005 case TARGET_RLIMIT_DATA:
1006 return RLIMIT_DATA;
1007 case TARGET_RLIMIT_FSIZE:
1008 return RLIMIT_FSIZE;
1009 case TARGET_RLIMIT_LOCKS:
1010 return RLIMIT_LOCKS;
1011 case TARGET_RLIMIT_MEMLOCK:
1012 return RLIMIT_MEMLOCK;
1013 case TARGET_RLIMIT_MSGQUEUE:
1014 return RLIMIT_MSGQUEUE;
1015 case TARGET_RLIMIT_NICE:
1016 return RLIMIT_NICE;
1017 case TARGET_RLIMIT_NOFILE:
1018 return RLIMIT_NOFILE;
1019 case TARGET_RLIMIT_NPROC:
1020 return RLIMIT_NPROC;
1021 case TARGET_RLIMIT_RSS:
1022 return RLIMIT_RSS;
1023 case TARGET_RLIMIT_RTPRIO:
1024 return RLIMIT_RTPRIO;
1025 case TARGET_RLIMIT_SIGPENDING:
1026 return RLIMIT_SIGPENDING;
1027 case TARGET_RLIMIT_STACK:
1028 return RLIMIT_STACK;
1029 default:
1030 return code;
1031 }
1032 }
1033
1034 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1035 abi_ulong target_tv_addr)
1036 {
1037 struct target_timeval *target_tv;
1038
1039 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1040 return -TARGET_EFAULT;
1041
1042 __get_user(tv->tv_sec, &target_tv->tv_sec);
1043 __get_user(tv->tv_usec, &target_tv->tv_usec);
1044
1045 unlock_user_struct(target_tv, target_tv_addr, 0);
1046
1047 return 0;
1048 }
1049
1050 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1051 const struct timeval *tv)
1052 {
1053 struct target_timeval *target_tv;
1054
1055 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1056 return -TARGET_EFAULT;
1057
1058 __put_user(tv->tv_sec, &target_tv->tv_sec);
1059 __put_user(tv->tv_usec, &target_tv->tv_usec);
1060
1061 unlock_user_struct(target_tv, target_tv_addr, 1);
1062
1063 return 0;
1064 }
1065
1066 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1067 #include <mqueue.h>
1068
1069 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1070 abi_ulong target_mq_attr_addr)
1071 {
1072 struct target_mq_attr *target_mq_attr;
1073
1074 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1075 target_mq_attr_addr, 1))
1076 return -TARGET_EFAULT;
1077
1078 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1079 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1080 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1081 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1082
1083 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1084
1085 return 0;
1086 }
1087
1088 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1089 const struct mq_attr *attr)
1090 {
1091 struct target_mq_attr *target_mq_attr;
1092
1093 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1094 target_mq_attr_addr, 0))
1095 return -TARGET_EFAULT;
1096
1097 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1098 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1099 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1100 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1101
1102 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1103
1104 return 0;
1105 }
1106 #endif
1107
1108 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1109 /* do_select() must return target values and target errnos. */
1110 static abi_long do_select(int n,
1111 abi_ulong rfd_addr, abi_ulong wfd_addr,
1112 abi_ulong efd_addr, abi_ulong target_tv_addr)
1113 {
1114 fd_set rfds, wfds, efds;
1115 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1116 struct timeval tv, *tv_ptr;
1117 abi_long ret;
1118
1119 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1120 if (ret) {
1121 return ret;
1122 }
1123 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1124 if (ret) {
1125 return ret;
1126 }
1127 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1128 if (ret) {
1129 return ret;
1130 }
1131
1132 if (target_tv_addr) {
1133 if (copy_from_user_timeval(&tv, target_tv_addr))
1134 return -TARGET_EFAULT;
1135 tv_ptr = &tv;
1136 } else {
1137 tv_ptr = NULL;
1138 }
1139
1140 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1141
1142 if (!is_error(ret)) {
1143 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1144 return -TARGET_EFAULT;
1145 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1146 return -TARGET_EFAULT;
1147 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1148 return -TARGET_EFAULT;
1149
1150 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1151 return -TARGET_EFAULT;
1152 }
1153
1154 return ret;
1155 }
1156 #endif
1157
1158 static abi_long do_pipe2(int host_pipe[], int flags)
1159 {
1160 #ifdef CONFIG_PIPE2
1161 return pipe2(host_pipe, flags);
1162 #else
1163 return -ENOSYS;
1164 #endif
1165 }
1166
1167 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1168 int flags, int is_pipe2)
1169 {
1170 int host_pipe[2];
1171 abi_long ret;
1172 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1173
1174 if (is_error(ret))
1175 return get_errno(ret);
1176
1177 /* Several targets have special calling conventions for the original
1178 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1179 if (!is_pipe2) {
1180 #if defined(TARGET_ALPHA)
1181 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1182 return host_pipe[0];
1183 #elif defined(TARGET_MIPS)
1184 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1185 return host_pipe[0];
1186 #elif defined(TARGET_SH4)
1187 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1188 return host_pipe[0];
1189 #endif
1190 }
1191
1192 if (put_user_s32(host_pipe[0], pipedes)
1193 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1194 return -TARGET_EFAULT;
1195 return get_errno(ret);
1196 }
1197
1198 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1199 abi_ulong target_addr,
1200 socklen_t len)
1201 {
1202 struct target_ip_mreqn *target_smreqn;
1203
1204 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1205 if (!target_smreqn)
1206 return -TARGET_EFAULT;
1207 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1208 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1209 if (len == sizeof(struct target_ip_mreqn))
1210 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1211 unlock_user(target_smreqn, target_addr, 0);
1212
1213 return 0;
1214 }
1215
1216 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1217 abi_ulong target_addr,
1218 socklen_t len)
1219 {
1220 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1221 sa_family_t sa_family;
1222 struct target_sockaddr *target_saddr;
1223
1224 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1225 if (!target_saddr)
1226 return -TARGET_EFAULT;
1227
1228 sa_family = tswap16(target_saddr->sa_family);
1229
1230 /* Oops. The caller might send a incomplete sun_path; sun_path
1231 * must be terminated by \0 (see the manual page), but
1232 * unfortunately it is quite common to specify sockaddr_un
1233 * length as "strlen(x->sun_path)" while it should be
1234 * "strlen(...) + 1". We'll fix that here if needed.
1235 * Linux kernel has a similar feature.
1236 */
1237
1238 if (sa_family == AF_UNIX) {
1239 if (len < unix_maxlen && len > 0) {
1240 char *cp = (char*)target_saddr;
1241
1242 if ( cp[len-1] && !cp[len] )
1243 len++;
1244 }
1245 if (len > unix_maxlen)
1246 len = unix_maxlen;
1247 }
1248
1249 memcpy(addr, target_saddr, len);
1250 addr->sa_family = sa_family;
1251 unlock_user(target_saddr, target_addr, 0);
1252
1253 return 0;
1254 }
1255
1256 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1257 struct sockaddr *addr,
1258 socklen_t len)
1259 {
1260 struct target_sockaddr *target_saddr;
1261
1262 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1263 if (!target_saddr)
1264 return -TARGET_EFAULT;
1265 memcpy(target_saddr, addr, len);
1266 target_saddr->sa_family = tswap16(addr->sa_family);
1267 unlock_user(target_saddr, target_addr, len);
1268
1269 return 0;
1270 }
1271
1272 /* ??? Should this also swap msgh->name? */
1273 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1274 struct target_msghdr *target_msgh)
1275 {
1276 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1277 abi_long msg_controllen;
1278 abi_ulong target_cmsg_addr;
1279 struct target_cmsghdr *target_cmsg;
1280 socklen_t space = 0;
1281
1282 msg_controllen = tswapal(target_msgh->msg_controllen);
1283 if (msg_controllen < sizeof (struct target_cmsghdr))
1284 goto the_end;
1285 target_cmsg_addr = tswapal(target_msgh->msg_control);
1286 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1287 if (!target_cmsg)
1288 return -TARGET_EFAULT;
1289
1290 while (cmsg && target_cmsg) {
1291 void *data = CMSG_DATA(cmsg);
1292 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1293
1294 int len = tswapal(target_cmsg->cmsg_len)
1295 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1296
1297 space += CMSG_SPACE(len);
1298 if (space > msgh->msg_controllen) {
1299 space -= CMSG_SPACE(len);
1300 gemu_log("Host cmsg overflow\n");
1301 break;
1302 }
1303
1304 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1305 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1306 cmsg->cmsg_len = CMSG_LEN(len);
1307
1308 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1309 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1310 memcpy(data, target_data, len);
1311 } else {
1312 int *fd = (int *)data;
1313 int *target_fd = (int *)target_data;
1314 int i, numfds = len / sizeof(int);
1315
1316 for (i = 0; i < numfds; i++)
1317 fd[i] = tswap32(target_fd[i]);
1318 }
1319
1320 cmsg = CMSG_NXTHDR(msgh, cmsg);
1321 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1322 }
1323 unlock_user(target_cmsg, target_cmsg_addr, 0);
1324 the_end:
1325 msgh->msg_controllen = space;
1326 return 0;
1327 }
1328
1329 /* ??? Should this also swap msgh->name? */
1330 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1331 struct msghdr *msgh)
1332 {
1333 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1334 abi_long msg_controllen;
1335 abi_ulong target_cmsg_addr;
1336 struct target_cmsghdr *target_cmsg;
1337 socklen_t space = 0;
1338
1339 msg_controllen = tswapal(target_msgh->msg_controllen);
1340 if (msg_controllen < sizeof (struct target_cmsghdr))
1341 goto the_end;
1342 target_cmsg_addr = tswapal(target_msgh->msg_control);
1343 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1344 if (!target_cmsg)
1345 return -TARGET_EFAULT;
1346
1347 while (cmsg && target_cmsg) {
1348 void *data = CMSG_DATA(cmsg);
1349 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1350
1351 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1352
1353 space += TARGET_CMSG_SPACE(len);
1354 if (space > msg_controllen) {
1355 space -= TARGET_CMSG_SPACE(len);
1356 gemu_log("Target cmsg overflow\n");
1357 break;
1358 }
1359
1360 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1361 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1362 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1363
1364 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1365 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1366 memcpy(target_data, data, len);
1367 } else {
1368 int *fd = (int *)data;
1369 int *target_fd = (int *)target_data;
1370 int i, numfds = len / sizeof(int);
1371
1372 for (i = 0; i < numfds; i++)
1373 target_fd[i] = tswap32(fd[i]);
1374 }
1375
1376 cmsg = CMSG_NXTHDR(msgh, cmsg);
1377 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1378 }
1379 unlock_user(target_cmsg, target_cmsg_addr, space);
1380 the_end:
1381 target_msgh->msg_controllen = tswapal(space);
1382 return 0;
1383 }
1384
1385 /* do_setsockopt() Must return target values and target errnos. */
1386 static abi_long do_setsockopt(int sockfd, int level, int optname,
1387 abi_ulong optval_addr, socklen_t optlen)
1388 {
1389 abi_long ret;
1390 int val;
1391 struct ip_mreqn *ip_mreq;
1392 struct ip_mreq_source *ip_mreq_source;
1393
1394 switch(level) {
1395 case SOL_TCP:
1396 /* TCP options all take an 'int' value. */
1397 if (optlen < sizeof(uint32_t))
1398 return -TARGET_EINVAL;
1399
1400 if (get_user_u32(val, optval_addr))
1401 return -TARGET_EFAULT;
1402 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1403 break;
1404 case SOL_IP:
1405 switch(optname) {
1406 case IP_TOS:
1407 case IP_TTL:
1408 case IP_HDRINCL:
1409 case IP_ROUTER_ALERT:
1410 case IP_RECVOPTS:
1411 case IP_RETOPTS:
1412 case IP_PKTINFO:
1413 case IP_MTU_DISCOVER:
1414 case IP_RECVERR:
1415 case IP_RECVTOS:
1416 #ifdef IP_FREEBIND
1417 case IP_FREEBIND:
1418 #endif
1419 case IP_MULTICAST_TTL:
1420 case IP_MULTICAST_LOOP:
1421 val = 0;
1422 if (optlen >= sizeof(uint32_t)) {
1423 if (get_user_u32(val, optval_addr))
1424 return -TARGET_EFAULT;
1425 } else if (optlen >= 1) {
1426 if (get_user_u8(val, optval_addr))
1427 return -TARGET_EFAULT;
1428 }
1429 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1430 break;
1431 case IP_ADD_MEMBERSHIP:
1432 case IP_DROP_MEMBERSHIP:
1433 if (optlen < sizeof (struct target_ip_mreq) ||
1434 optlen > sizeof (struct target_ip_mreqn))
1435 return -TARGET_EINVAL;
1436
1437 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1438 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1439 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1440 break;
1441
1442 case IP_BLOCK_SOURCE:
1443 case IP_UNBLOCK_SOURCE:
1444 case IP_ADD_SOURCE_MEMBERSHIP:
1445 case IP_DROP_SOURCE_MEMBERSHIP:
1446 if (optlen != sizeof (struct target_ip_mreq_source))
1447 return -TARGET_EINVAL;
1448
1449 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1450 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1451 unlock_user (ip_mreq_source, optval_addr, 0);
1452 break;
1453
1454 default:
1455 goto unimplemented;
1456 }
1457 break;
1458 case TARGET_SOL_SOCKET:
1459 switch (optname) {
1460 /* Options with 'int' argument. */
1461 case TARGET_SO_DEBUG:
1462 optname = SO_DEBUG;
1463 break;
1464 case TARGET_SO_REUSEADDR:
1465 optname = SO_REUSEADDR;
1466 break;
1467 case TARGET_SO_TYPE:
1468 optname = SO_TYPE;
1469 break;
1470 case TARGET_SO_ERROR:
1471 optname = SO_ERROR;
1472 break;
1473 case TARGET_SO_DONTROUTE:
1474 optname = SO_DONTROUTE;
1475 break;
1476 case TARGET_SO_BROADCAST:
1477 optname = SO_BROADCAST;
1478 break;
1479 case TARGET_SO_SNDBUF:
1480 optname = SO_SNDBUF;
1481 break;
1482 case TARGET_SO_RCVBUF:
1483 optname = SO_RCVBUF;
1484 break;
1485 case TARGET_SO_KEEPALIVE:
1486 optname = SO_KEEPALIVE;
1487 break;
1488 case TARGET_SO_OOBINLINE:
1489 optname = SO_OOBINLINE;
1490 break;
1491 case TARGET_SO_NO_CHECK:
1492 optname = SO_NO_CHECK;
1493 break;
1494 case TARGET_SO_PRIORITY:
1495 optname = SO_PRIORITY;
1496 break;
1497 #ifdef SO_BSDCOMPAT
1498 case TARGET_SO_BSDCOMPAT:
1499 optname = SO_BSDCOMPAT;
1500 break;
1501 #endif
1502 case TARGET_SO_PASSCRED:
1503 optname = SO_PASSCRED;
1504 break;
1505 case TARGET_SO_TIMESTAMP:
1506 optname = SO_TIMESTAMP;
1507 break;
1508 case TARGET_SO_RCVLOWAT:
1509 optname = SO_RCVLOWAT;
1510 break;
1511 case TARGET_SO_RCVTIMEO:
1512 optname = SO_RCVTIMEO;
1513 break;
1514 case TARGET_SO_SNDTIMEO:
1515 optname = SO_SNDTIMEO;
1516 break;
1517 break;
1518 default:
1519 goto unimplemented;
1520 }
1521 if (optlen < sizeof(uint32_t))
1522 return -TARGET_EINVAL;
1523
1524 if (get_user_u32(val, optval_addr))
1525 return -TARGET_EFAULT;
1526 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1527 break;
1528 default:
1529 unimplemented:
1530 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1531 ret = -TARGET_ENOPROTOOPT;
1532 }
1533 return ret;
1534 }
1535
1536 /* do_getsockopt() Must return target values and target errnos. */
1537 static abi_long do_getsockopt(int sockfd, int level, int optname,
1538 abi_ulong optval_addr, abi_ulong optlen)
1539 {
1540 abi_long ret;
1541 int len, val;
1542 socklen_t lv;
1543
1544 switch(level) {
1545 case TARGET_SOL_SOCKET:
1546 level = SOL_SOCKET;
1547 switch (optname) {
1548 /* These don't just return a single integer */
1549 case TARGET_SO_LINGER:
1550 case TARGET_SO_RCVTIMEO:
1551 case TARGET_SO_SNDTIMEO:
1552 case TARGET_SO_PEERNAME:
1553 goto unimplemented;
1554 case TARGET_SO_PEERCRED: {
1555 struct ucred cr;
1556 socklen_t crlen;
1557 struct target_ucred *tcr;
1558
1559 if (get_user_u32(len, optlen)) {
1560 return -TARGET_EFAULT;
1561 }
1562 if (len < 0) {
1563 return -TARGET_EINVAL;
1564 }
1565
1566 crlen = sizeof(cr);
1567 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1568 &cr, &crlen));
1569 if (ret < 0) {
1570 return ret;
1571 }
1572 if (len > crlen) {
1573 len = crlen;
1574 }
1575 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1576 return -TARGET_EFAULT;
1577 }
1578 __put_user(cr.pid, &tcr->pid);
1579 __put_user(cr.uid, &tcr->uid);
1580 __put_user(cr.gid, &tcr->gid);
1581 unlock_user_struct(tcr, optval_addr, 1);
1582 if (put_user_u32(len, optlen)) {
1583 return -TARGET_EFAULT;
1584 }
1585 break;
1586 }
1587 /* Options with 'int' argument. */
1588 case TARGET_SO_DEBUG:
1589 optname = SO_DEBUG;
1590 goto int_case;
1591 case TARGET_SO_REUSEADDR:
1592 optname = SO_REUSEADDR;
1593 goto int_case;
1594 case TARGET_SO_TYPE:
1595 optname = SO_TYPE;
1596 goto int_case;
1597 case TARGET_SO_ERROR:
1598 optname = SO_ERROR;
1599 goto int_case;
1600 case TARGET_SO_DONTROUTE:
1601 optname = SO_DONTROUTE;
1602 goto int_case;
1603 case TARGET_SO_BROADCAST:
1604 optname = SO_BROADCAST;
1605 goto int_case;
1606 case TARGET_SO_SNDBUF:
1607 optname = SO_SNDBUF;
1608 goto int_case;
1609 case TARGET_SO_RCVBUF:
1610 optname = SO_RCVBUF;
1611 goto int_case;
1612 case TARGET_SO_KEEPALIVE:
1613 optname = SO_KEEPALIVE;
1614 goto int_case;
1615 case TARGET_SO_OOBINLINE:
1616 optname = SO_OOBINLINE;
1617 goto int_case;
1618 case TARGET_SO_NO_CHECK:
1619 optname = SO_NO_CHECK;
1620 goto int_case;
1621 case TARGET_SO_PRIORITY:
1622 optname = SO_PRIORITY;
1623 goto int_case;
1624 #ifdef SO_BSDCOMPAT
1625 case TARGET_SO_BSDCOMPAT:
1626 optname = SO_BSDCOMPAT;
1627 goto int_case;
1628 #endif
1629 case TARGET_SO_PASSCRED:
1630 optname = SO_PASSCRED;
1631 goto int_case;
1632 case TARGET_SO_TIMESTAMP:
1633 optname = SO_TIMESTAMP;
1634 goto int_case;
1635 case TARGET_SO_RCVLOWAT:
1636 optname = SO_RCVLOWAT;
1637 goto int_case;
1638 default:
1639 goto int_case;
1640 }
1641 break;
1642 case SOL_TCP:
1643 /* TCP options all take an 'int' value. */
1644 int_case:
1645 if (get_user_u32(len, optlen))
1646 return -TARGET_EFAULT;
1647 if (len < 0)
1648 return -TARGET_EINVAL;
1649 lv = sizeof(lv);
1650 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1651 if (ret < 0)
1652 return ret;
1653 if (len > lv)
1654 len = lv;
1655 if (len == 4) {
1656 if (put_user_u32(val, optval_addr))
1657 return -TARGET_EFAULT;
1658 } else {
1659 if (put_user_u8(val, optval_addr))
1660 return -TARGET_EFAULT;
1661 }
1662 if (put_user_u32(len, optlen))
1663 return -TARGET_EFAULT;
1664 break;
1665 case SOL_IP:
1666 switch(optname) {
1667 case IP_TOS:
1668 case IP_TTL:
1669 case IP_HDRINCL:
1670 case IP_ROUTER_ALERT:
1671 case IP_RECVOPTS:
1672 case IP_RETOPTS:
1673 case IP_PKTINFO:
1674 case IP_MTU_DISCOVER:
1675 case IP_RECVERR:
1676 case IP_RECVTOS:
1677 #ifdef IP_FREEBIND
1678 case IP_FREEBIND:
1679 #endif
1680 case IP_MULTICAST_TTL:
1681 case IP_MULTICAST_LOOP:
1682 if (get_user_u32(len, optlen))
1683 return -TARGET_EFAULT;
1684 if (len < 0)
1685 return -TARGET_EINVAL;
1686 lv = sizeof(lv);
1687 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1688 if (ret < 0)
1689 return ret;
1690 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1691 len = 1;
1692 if (put_user_u32(len, optlen)
1693 || put_user_u8(val, optval_addr))
1694 return -TARGET_EFAULT;
1695 } else {
1696 if (len > sizeof(int))
1697 len = sizeof(int);
1698 if (put_user_u32(len, optlen)
1699 || put_user_u32(val, optval_addr))
1700 return -TARGET_EFAULT;
1701 }
1702 break;
1703 default:
1704 ret = -TARGET_ENOPROTOOPT;
1705 break;
1706 }
1707 break;
1708 default:
1709 unimplemented:
1710 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1711 level, optname);
1712 ret = -TARGET_EOPNOTSUPP;
1713 break;
1714 }
1715 return ret;
1716 }
1717
1718 /* FIXME
1719 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1720 * other lock functions have a return code of 0 for failure.
1721 */
1722 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1723 int count, int copy)
1724 {
1725 struct target_iovec *target_vec;
1726 abi_ulong base;
1727 int i;
1728
1729 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1730 if (!target_vec)
1731 return -TARGET_EFAULT;
1732 for(i = 0;i < count; i++) {
1733 base = tswapal(target_vec[i].iov_base);
1734 vec[i].iov_len = tswapal(target_vec[i].iov_len);
1735 if (vec[i].iov_len != 0) {
1736 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1737 /* Don't check lock_user return value. We must call writev even
1738 if a element has invalid base address. */
1739 } else {
1740 /* zero length pointer is ignored */
1741 vec[i].iov_base = NULL;
1742 }
1743 }
1744 unlock_user (target_vec, target_addr, 0);
1745 return 0;
1746 }
1747
1748 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1749 int count, int copy)
1750 {
1751 struct target_iovec *target_vec;
1752 abi_ulong base;
1753 int i;
1754
1755 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1756 if (!target_vec)
1757 return -TARGET_EFAULT;
1758 for(i = 0;i < count; i++) {
1759 if (target_vec[i].iov_base) {
1760 base = tswapal(target_vec[i].iov_base);
1761 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1762 }
1763 }
1764 unlock_user (target_vec, target_addr, 0);
1765
1766 return 0;
1767 }
1768
1769 /* do_socket() Must return target values and target errnos. */
1770 static abi_long do_socket(int domain, int type, int protocol)
1771 {
1772 #if defined(TARGET_MIPS)
1773 switch(type) {
1774 case TARGET_SOCK_DGRAM:
1775 type = SOCK_DGRAM;
1776 break;
1777 case TARGET_SOCK_STREAM:
1778 type = SOCK_STREAM;
1779 break;
1780 case TARGET_SOCK_RAW:
1781 type = SOCK_RAW;
1782 break;
1783 case TARGET_SOCK_RDM:
1784 type = SOCK_RDM;
1785 break;
1786 case TARGET_SOCK_SEQPACKET:
1787 type = SOCK_SEQPACKET;
1788 break;
1789 case TARGET_SOCK_PACKET:
1790 type = SOCK_PACKET;
1791 break;
1792 }
1793 #endif
1794 if (domain == PF_NETLINK)
1795 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1796 return get_errno(socket(domain, type, protocol));
1797 }
1798
1799 /* do_bind() Must return target values and target errnos. */
1800 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1801 socklen_t addrlen)
1802 {
1803 void *addr;
1804 abi_long ret;
1805
1806 if ((int)addrlen < 0) {
1807 return -TARGET_EINVAL;
1808 }
1809
1810 addr = alloca(addrlen+1);
1811
1812 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1813 if (ret)
1814 return ret;
1815
1816 return get_errno(bind(sockfd, addr, addrlen));
1817 }
1818
1819 /* do_connect() Must return target values and target errnos. */
1820 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1821 socklen_t addrlen)
1822 {
1823 void *addr;
1824 abi_long ret;
1825
1826 if ((int)addrlen < 0) {
1827 return -TARGET_EINVAL;
1828 }
1829
1830 addr = alloca(addrlen);
1831
1832 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1833 if (ret)
1834 return ret;
1835
1836 return get_errno(connect(sockfd, addr, addrlen));
1837 }
1838
1839 /* do_sendrecvmsg() Must return target values and target errnos. */
1840 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1841 int flags, int send)
1842 {
1843 abi_long ret, len;
1844 struct target_msghdr *msgp;
1845 struct msghdr msg;
1846 int count;
1847 struct iovec *vec;
1848 abi_ulong target_vec;
1849
1850 /* FIXME */
1851 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1852 msgp,
1853 target_msg,
1854 send ? 1 : 0))
1855 return -TARGET_EFAULT;
1856 if (msgp->msg_name) {
1857 msg.msg_namelen = tswap32(msgp->msg_namelen);
1858 msg.msg_name = alloca(msg.msg_namelen);
1859 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1860 msg.msg_namelen);
1861 if (ret) {
1862 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1863 return ret;
1864 }
1865 } else {
1866 msg.msg_name = NULL;
1867 msg.msg_namelen = 0;
1868 }
1869 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1870 msg.msg_control = alloca(msg.msg_controllen);
1871 msg.msg_flags = tswap32(msgp->msg_flags);
1872
1873 count = tswapal(msgp->msg_iovlen);
1874 vec = alloca(count * sizeof(struct iovec));
1875 target_vec = tswapal(msgp->msg_iov);
1876 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1877 msg.msg_iovlen = count;
1878 msg.msg_iov = vec;
1879
1880 if (send) {
1881 ret = target_to_host_cmsg(&msg, msgp);
1882 if (ret == 0)
1883 ret = get_errno(sendmsg(fd, &msg, flags));
1884 } else {
1885 ret = get_errno(recvmsg(fd, &msg, flags));
1886 if (!is_error(ret)) {
1887 len = ret;
1888 ret = host_to_target_cmsg(msgp, &msg);
1889 if (!is_error(ret))
1890 ret = len;
1891 }
1892 }
1893 unlock_iovec(vec, target_vec, count, !send);
1894 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1895 return ret;
1896 }
1897
1898 /* do_accept() Must return target values and target errnos. */
1899 static abi_long do_accept(int fd, abi_ulong target_addr,
1900 abi_ulong target_addrlen_addr)
1901 {
1902 socklen_t addrlen;
1903 void *addr;
1904 abi_long ret;
1905
1906 if (target_addr == 0)
1907 return get_errno(accept(fd, NULL, NULL));
1908
1909 /* linux returns EINVAL if addrlen pointer is invalid */
1910 if (get_user_u32(addrlen, target_addrlen_addr))
1911 return -TARGET_EINVAL;
1912
1913 if ((int)addrlen < 0) {
1914 return -TARGET_EINVAL;
1915 }
1916
1917 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1918 return -TARGET_EINVAL;
1919
1920 addr = alloca(addrlen);
1921
1922 ret = get_errno(accept(fd, addr, &addrlen));
1923 if (!is_error(ret)) {
1924 host_to_target_sockaddr(target_addr, addr, addrlen);
1925 if (put_user_u32(addrlen, target_addrlen_addr))
1926 ret = -TARGET_EFAULT;
1927 }
1928 return ret;
1929 }
1930
1931 /* do_getpeername() Must return target values and target errnos. */
1932 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1933 abi_ulong target_addrlen_addr)
1934 {
1935 socklen_t addrlen;
1936 void *addr;
1937 abi_long ret;
1938
1939 if (get_user_u32(addrlen, target_addrlen_addr))
1940 return -TARGET_EFAULT;
1941
1942 if ((int)addrlen < 0) {
1943 return -TARGET_EINVAL;
1944 }
1945
1946 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1947 return -TARGET_EFAULT;
1948
1949 addr = alloca(addrlen);
1950
1951 ret = get_errno(getpeername(fd, addr, &addrlen));
1952 if (!is_error(ret)) {
1953 host_to_target_sockaddr(target_addr, addr, addrlen);
1954 if (put_user_u32(addrlen, target_addrlen_addr))
1955 ret = -TARGET_EFAULT;
1956 }
1957 return ret;
1958 }
1959
1960 /* do_getsockname() Must return target values and target errnos. */
1961 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1962 abi_ulong target_addrlen_addr)
1963 {
1964 socklen_t addrlen;
1965 void *addr;
1966 abi_long ret;
1967
1968 if (get_user_u32(addrlen, target_addrlen_addr))
1969 return -TARGET_EFAULT;
1970
1971 if ((int)addrlen < 0) {
1972 return -TARGET_EINVAL;
1973 }
1974
1975 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1976 return -TARGET_EFAULT;
1977
1978 addr = alloca(addrlen);
1979
1980 ret = get_errno(getsockname(fd, addr, &addrlen));
1981 if (!is_error(ret)) {
1982 host_to_target_sockaddr(target_addr, addr, addrlen);
1983 if (put_user_u32(addrlen, target_addrlen_addr))
1984 ret = -TARGET_EFAULT;
1985 }
1986 return ret;
1987 }
1988
1989 /* do_socketpair() Must return target values and target errnos. */
1990 static abi_long do_socketpair(int domain, int type, int protocol,
1991 abi_ulong target_tab_addr)
1992 {
1993 int tab[2];
1994 abi_long ret;
1995
1996 ret = get_errno(socketpair(domain, type, protocol, tab));
1997 if (!is_error(ret)) {
1998 if (put_user_s32(tab[0], target_tab_addr)
1999 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2000 ret = -TARGET_EFAULT;
2001 }
2002 return ret;
2003 }
2004
2005 /* do_sendto() Must return target values and target errnos. */
2006 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2007 abi_ulong target_addr, socklen_t addrlen)
2008 {
2009 void *addr;
2010 void *host_msg;
2011 abi_long ret;
2012
2013 if ((int)addrlen < 0) {
2014 return -TARGET_EINVAL;
2015 }
2016
2017 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2018 if (!host_msg)
2019 return -TARGET_EFAULT;
2020 if (target_addr) {
2021 addr = alloca(addrlen);
2022 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2023 if (ret) {
2024 unlock_user(host_msg, msg, 0);
2025 return ret;
2026 }
2027 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2028 } else {
2029 ret = get_errno(send(fd, host_msg, len, flags));
2030 }
2031 unlock_user(host_msg, msg, 0);
2032 return ret;
2033 }
2034
2035 /* do_recvfrom() Must return target values and target errnos. */
2036 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2037 abi_ulong target_addr,
2038 abi_ulong target_addrlen)
2039 {
2040 socklen_t addrlen;
2041 void *addr;
2042 void *host_msg;
2043 abi_long ret;
2044
2045 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2046 if (!host_msg)
2047 return -TARGET_EFAULT;
2048 if (target_addr) {
2049 if (get_user_u32(addrlen, target_addrlen)) {
2050 ret = -TARGET_EFAULT;
2051 goto fail;
2052 }
2053 if ((int)addrlen < 0) {
2054 ret = -TARGET_EINVAL;
2055 goto fail;
2056 }
2057 addr = alloca(addrlen);
2058 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2059 } else {
2060 addr = NULL; /* To keep compiler quiet. */
2061 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2062 }
2063 if (!is_error(ret)) {
2064 if (target_addr) {
2065 host_to_target_sockaddr(target_addr, addr, addrlen);
2066 if (put_user_u32(addrlen, target_addrlen)) {
2067 ret = -TARGET_EFAULT;
2068 goto fail;
2069 }
2070 }
2071 unlock_user(host_msg, msg, len);
2072 } else {
2073 fail:
2074 unlock_user(host_msg, msg, 0);
2075 }
2076 return ret;
2077 }
2078
2079 #ifdef TARGET_NR_socketcall
2080 /* do_socketcall() Must return target values and target errnos. */
2081 static abi_long do_socketcall(int num, abi_ulong vptr)
2082 {
2083 abi_long ret;
2084 const int n = sizeof(abi_ulong);
2085
2086 switch(num) {
2087 case SOCKOP_socket:
2088 {
2089 abi_ulong domain, type, protocol;
2090
2091 if (get_user_ual(domain, vptr)
2092 || get_user_ual(type, vptr + n)
2093 || get_user_ual(protocol, vptr + 2 * n))
2094 return -TARGET_EFAULT;
2095
2096 ret = do_socket(domain, type, protocol);
2097 }
2098 break;
2099 case SOCKOP_bind:
2100 {
2101 abi_ulong sockfd;
2102 abi_ulong target_addr;
2103 socklen_t addrlen;
2104
2105 if (get_user_ual(sockfd, vptr)
2106 || get_user_ual(target_addr, vptr + n)
2107 || get_user_ual(addrlen, vptr + 2 * n))
2108 return -TARGET_EFAULT;
2109
2110 ret = do_bind(sockfd, target_addr, addrlen);
2111 }
2112 break;
2113 case SOCKOP_connect:
2114 {
2115 abi_ulong sockfd;
2116 abi_ulong target_addr;
2117 socklen_t addrlen;
2118
2119 if (get_user_ual(sockfd, vptr)
2120 || get_user_ual(target_addr, vptr + n)
2121 || get_user_ual(addrlen, vptr + 2 * n))
2122 return -TARGET_EFAULT;
2123
2124 ret = do_connect(sockfd, target_addr, addrlen);
2125 }
2126 break;
2127 case SOCKOP_listen:
2128 {
2129 abi_ulong sockfd, backlog;
2130
2131 if (get_user_ual(sockfd, vptr)
2132 || get_user_ual(backlog, vptr + n))
2133 return -TARGET_EFAULT;
2134
2135 ret = get_errno(listen(sockfd, backlog));
2136 }
2137 break;
2138 case SOCKOP_accept:
2139 {
2140 abi_ulong sockfd;
2141 abi_ulong target_addr, target_addrlen;
2142
2143 if (get_user_ual(sockfd, vptr)
2144 || get_user_ual(target_addr, vptr + n)
2145 || get_user_ual(target_addrlen, vptr + 2 * n))
2146 return -TARGET_EFAULT;
2147
2148 ret = do_accept(sockfd, target_addr, target_addrlen);
2149 }
2150 break;
2151 case SOCKOP_getsockname:
2152 {
2153 abi_ulong sockfd;
2154 abi_ulong target_addr, target_addrlen;
2155
2156 if (get_user_ual(sockfd, vptr)
2157 || get_user_ual(target_addr, vptr + n)
2158 || get_user_ual(target_addrlen, vptr + 2 * n))
2159 return -TARGET_EFAULT;
2160
2161 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2162 }
2163 break;
2164 case SOCKOP_getpeername:
2165 {
2166 abi_ulong sockfd;
2167 abi_ulong target_addr, target_addrlen;
2168
2169 if (get_user_ual(sockfd, vptr)
2170 || get_user_ual(target_addr, vptr + n)
2171 || get_user_ual(target_addrlen, vptr + 2 * n))
2172 return -TARGET_EFAULT;
2173
2174 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2175 }
2176 break;
2177 case SOCKOP_socketpair:
2178 {
2179 abi_ulong domain, type, protocol;
2180 abi_ulong tab;
2181
2182 if (get_user_ual(domain, vptr)
2183 || get_user_ual(type, vptr + n)
2184 || get_user_ual(protocol, vptr + 2 * n)
2185 || get_user_ual(tab, vptr + 3 * n))
2186 return -TARGET_EFAULT;
2187
2188 ret = do_socketpair(domain, type, protocol, tab);
2189 }
2190 break;
2191 case SOCKOP_send:
2192 {
2193 abi_ulong sockfd;
2194 abi_ulong msg;
2195 size_t len;
2196 abi_ulong flags;
2197
2198 if (get_user_ual(sockfd, vptr)
2199 || get_user_ual(msg, vptr + n)
2200 || get_user_ual(len, vptr + 2 * n)
2201 || get_user_ual(flags, vptr + 3 * n))
2202 return -TARGET_EFAULT;
2203
2204 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2205 }
2206 break;
2207 case SOCKOP_recv:
2208 {
2209 abi_ulong sockfd;
2210 abi_ulong msg;
2211 size_t len;
2212 abi_ulong flags;
2213
2214 if (get_user_ual(sockfd, vptr)
2215 || get_user_ual(msg, vptr + n)
2216 || get_user_ual(len, vptr + 2 * n)
2217 || get_user_ual(flags, vptr + 3 * n))
2218 return -TARGET_EFAULT;
2219
2220 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2221 }
2222 break;
2223 case SOCKOP_sendto:
2224 {
2225 abi_ulong sockfd;
2226 abi_ulong msg;
2227 size_t len;
2228 abi_ulong flags;
2229 abi_ulong addr;
2230 socklen_t addrlen;
2231
2232 if (get_user_ual(sockfd, vptr)
2233 || get_user_ual(msg, vptr + n)
2234 || get_user_ual(len, vptr + 2 * n)
2235 || get_user_ual(flags, vptr + 3 * n)
2236 || get_user_ual(addr, vptr + 4 * n)
2237 || get_user_ual(addrlen, vptr + 5 * n))
2238 return -TARGET_EFAULT;
2239
2240 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2241 }
2242 break;
2243 case SOCKOP_recvfrom:
2244 {
2245 abi_ulong sockfd;
2246 abi_ulong msg;
2247 size_t len;
2248 abi_ulong flags;
2249 abi_ulong addr;
2250 socklen_t addrlen;
2251
2252 if (get_user_ual(sockfd, vptr)
2253 || get_user_ual(msg, vptr + n)
2254 || get_user_ual(len, vptr + 2 * n)
2255 || get_user_ual(flags, vptr + 3 * n)
2256 || get_user_ual(addr, vptr + 4 * n)
2257 || get_user_ual(addrlen, vptr + 5 * n))
2258 return -TARGET_EFAULT;
2259
2260 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2261 }
2262 break;
2263 case SOCKOP_shutdown:
2264 {
2265 abi_ulong sockfd, how;
2266
2267 if (get_user_ual(sockfd, vptr)
2268 || get_user_ual(how, vptr + n))
2269 return -TARGET_EFAULT;
2270
2271 ret = get_errno(shutdown(sockfd, how));
2272 }
2273 break;
2274 case SOCKOP_sendmsg:
2275 case SOCKOP_recvmsg:
2276 {
2277 abi_ulong fd;
2278 abi_ulong target_msg;
2279 abi_ulong flags;
2280
2281 if (get_user_ual(fd, vptr)
2282 || get_user_ual(target_msg, vptr + n)
2283 || get_user_ual(flags, vptr + 2 * n))
2284 return -TARGET_EFAULT;
2285
2286 ret = do_sendrecvmsg(fd, target_msg, flags,
2287 (num == SOCKOP_sendmsg));
2288 }
2289 break;
2290 case SOCKOP_setsockopt:
2291 {
2292 abi_ulong sockfd;
2293 abi_ulong level;
2294 abi_ulong optname;
2295 abi_ulong optval;
2296 socklen_t optlen;
2297
2298 if (get_user_ual(sockfd, vptr)
2299 || get_user_ual(level, vptr + n)
2300 || get_user_ual(optname, vptr + 2 * n)
2301 || get_user_ual(optval, vptr + 3 * n)
2302 || get_user_ual(optlen, vptr + 4 * n))
2303 return -TARGET_EFAULT;
2304
2305 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2306 }
2307 break;
2308 case SOCKOP_getsockopt:
2309 {
2310 abi_ulong sockfd;
2311 abi_ulong level;
2312 abi_ulong optname;
2313 abi_ulong optval;
2314 socklen_t optlen;
2315
2316 if (get_user_ual(sockfd, vptr)
2317 || get_user_ual(level, vptr + n)
2318 || get_user_ual(optname, vptr + 2 * n)
2319 || get_user_ual(optval, vptr + 3 * n)
2320 || get_user_ual(optlen, vptr + 4 * n))
2321 return -TARGET_EFAULT;
2322
2323 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2324 }
2325 break;
2326 default:
2327 gemu_log("Unsupported socketcall: %d\n", num);
2328 ret = -TARGET_ENOSYS;
2329 break;
2330 }
2331 return ret;
2332 }
2333 #endif
2334
2335 #define N_SHM_REGIONS 32
2336
2337 static struct shm_region {
2338 abi_ulong start;
2339 abi_ulong size;
2340 } shm_regions[N_SHM_REGIONS];
2341
2342 struct target_ipc_perm
2343 {
2344 abi_long __key;
2345 abi_ulong uid;
2346 abi_ulong gid;
2347 abi_ulong cuid;
2348 abi_ulong cgid;
2349 unsigned short int mode;
2350 unsigned short int __pad1;
2351 unsigned short int __seq;
2352 unsigned short int __pad2;
2353 abi_ulong __unused1;
2354 abi_ulong __unused2;
2355 };
2356
2357 struct target_semid_ds
2358 {
2359 struct target_ipc_perm sem_perm;
2360 abi_ulong sem_otime;
2361 abi_ulong __unused1;
2362 abi_ulong sem_ctime;
2363 abi_ulong __unused2;
2364 abi_ulong sem_nsems;
2365 abi_ulong __unused3;
2366 abi_ulong __unused4;
2367 };
2368
2369 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2370 abi_ulong target_addr)
2371 {
2372 struct target_ipc_perm *target_ip;
2373 struct target_semid_ds *target_sd;
2374
2375 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2376 return -TARGET_EFAULT;
2377 target_ip = &(target_sd->sem_perm);
2378 host_ip->__key = tswapal(target_ip->__key);
2379 host_ip->uid = tswapal(target_ip->uid);
2380 host_ip->gid = tswapal(target_ip->gid);
2381 host_ip->cuid = tswapal(target_ip->cuid);
2382 host_ip->cgid = tswapal(target_ip->cgid);
2383 host_ip->mode = tswap16(target_ip->mode);
2384 unlock_user_struct(target_sd, target_addr, 0);
2385 return 0;
2386 }
2387
2388 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2389 struct ipc_perm *host_ip)
2390 {
2391 struct target_ipc_perm *target_ip;
2392 struct target_semid_ds *target_sd;
2393
2394 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2395 return -TARGET_EFAULT;
2396 target_ip = &(target_sd->sem_perm);
2397 target_ip->__key = tswapal(host_ip->__key);
2398 target_ip->uid = tswapal(host_ip->uid);
2399 target_ip->gid = tswapal(host_ip->gid);
2400 target_ip->cuid = tswapal(host_ip->cuid);
2401 target_ip->cgid = tswapal(host_ip->cgid);
2402 target_ip->mode = tswap16(host_ip->mode);
2403 unlock_user_struct(target_sd, target_addr, 1);
2404 return 0;
2405 }
2406
2407 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2408 abi_ulong target_addr)
2409 {
2410 struct target_semid_ds *target_sd;
2411
2412 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2413 return -TARGET_EFAULT;
2414 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2415 return -TARGET_EFAULT;
2416 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2417 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2418 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2419 unlock_user_struct(target_sd, target_addr, 0);
2420 return 0;
2421 }
2422
2423 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2424 struct semid_ds *host_sd)
2425 {
2426 struct target_semid_ds *target_sd;
2427
2428 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2429 return -TARGET_EFAULT;
2430 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2431 return -TARGET_EFAULT;
2432 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2433 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2434 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2435 unlock_user_struct(target_sd, target_addr, 1);
2436 return 0;
2437 }
2438
2439 struct target_seminfo {
2440 int semmap;
2441 int semmni;
2442 int semmns;
2443 int semmnu;
2444 int semmsl;
2445 int semopm;
2446 int semume;
2447 int semusz;
2448 int semvmx;
2449 int semaem;
2450 };
2451
2452 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2453 struct seminfo *host_seminfo)
2454 {
2455 struct target_seminfo *target_seminfo;
2456 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2457 return -TARGET_EFAULT;
2458 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2459 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2460 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2461 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2462 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2463 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2464 __put_user(host_seminfo->semume, &target_seminfo->semume);
2465 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2466 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2467 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2468 unlock_user_struct(target_seminfo, target_addr, 1);
2469 return 0;
2470 }
2471
2472 union semun {
2473 int val;
2474 struct semid_ds *buf;
2475 unsigned short *array;
2476 struct seminfo *__buf;
2477 };
2478
2479 union target_semun {
2480 int val;
2481 abi_ulong buf;
2482 abi_ulong array;
2483 abi_ulong __buf;
2484 };
2485
2486 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2487 abi_ulong target_addr)
2488 {
2489 int nsems;
2490 unsigned short *array;
2491 union semun semun;
2492 struct semid_ds semid_ds;
2493 int i, ret;
2494
2495 semun.buf = &semid_ds;
2496
2497 ret = semctl(semid, 0, IPC_STAT, semun);
2498 if (ret == -1)
2499 return get_errno(ret);
2500
2501 nsems = semid_ds.sem_nsems;
2502
2503 *host_array = malloc(nsems*sizeof(unsigned short));
2504 array = lock_user(VERIFY_READ, target_addr,
2505 nsems*sizeof(unsigned short), 1);
2506 if (!array)
2507 return -TARGET_EFAULT;
2508
2509 for(i=0; i<nsems; i++) {
2510 __get_user((*host_array)[i], &array[i]);
2511 }
2512 unlock_user(array, target_addr, 0);
2513
2514 return 0;
2515 }
2516
2517 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2518 unsigned short **host_array)
2519 {
2520 int nsems;
2521 unsigned short *array;
2522 union semun semun;
2523 struct semid_ds semid_ds;
2524 int i, ret;
2525
2526 semun.buf = &semid_ds;
2527
2528 ret = semctl(semid, 0, IPC_STAT, semun);
2529 if (ret == -1)
2530 return get_errno(ret);
2531
2532 nsems = semid_ds.sem_nsems;
2533
2534 array = lock_user(VERIFY_WRITE, target_addr,
2535 nsems*sizeof(unsigned short), 0);
2536 if (!array)
2537 return -TARGET_EFAULT;
2538
2539 for(i=0; i<nsems; i++) {
2540 __put_user((*host_array)[i], &array[i]);
2541 }
2542 free(*host_array);
2543 unlock_user(array, target_addr, 1);
2544
2545 return 0;
2546 }
2547
2548 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2549 union target_semun target_su)
2550 {
2551 union semun arg;
2552 struct semid_ds dsarg;
2553 unsigned short *array = NULL;
2554 struct seminfo seminfo;
2555 abi_long ret = -TARGET_EINVAL;
2556 abi_long err;
2557 cmd &= 0xff;
2558
2559 switch( cmd ) {
2560 case GETVAL:
2561 case SETVAL:
2562 arg.val = tswap32(target_su.val);
2563 ret = get_errno(semctl(semid, semnum, cmd, arg));
2564 target_su.val = tswap32(arg.val);
2565 break;
2566 case GETALL:
2567 case SETALL:
2568 err = target_to_host_semarray(semid, &array, target_su.array);
2569 if (err)
2570 return err;
2571 arg.array = array;
2572 ret = get_errno(semctl(semid, semnum, cmd, arg));
2573 err = host_to_target_semarray(semid, target_su.array, &array);
2574 if (err)
2575 return err;
2576 break;
2577 case IPC_STAT:
2578 case IPC_SET:
2579 case SEM_STAT:
2580 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2581 if (err)
2582 return err;
2583 arg.buf = &dsarg;
2584 ret = get_errno(semctl(semid, semnum, cmd, arg));
2585 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2586 if (err)
2587 return err;
2588 break;
2589 case IPC_INFO:
2590 case SEM_INFO:
2591 arg.__buf = &seminfo;
2592 ret = get_errno(semctl(semid, semnum, cmd, arg));
2593 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2594 if (err)
2595 return err;
2596 break;
2597 case IPC_RMID:
2598 case GETPID:
2599 case GETNCNT:
2600 case GETZCNT:
2601 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2602 break;
2603 }
2604
2605 return ret;
2606 }
2607
2608 struct target_sembuf {
2609 unsigned short sem_num;
2610 short sem_op;
2611 short sem_flg;
2612 };
2613
2614 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2615 abi_ulong target_addr,
2616 unsigned nsops)
2617 {
2618 struct target_sembuf *target_sembuf;
2619 int i;
2620
2621 target_sembuf = lock_user(VERIFY_READ, target_addr,
2622 nsops*sizeof(struct target_sembuf), 1);
2623 if (!target_sembuf)
2624 return -TARGET_EFAULT;
2625
2626 for(i=0; i<nsops; i++) {
2627 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2628 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2629 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2630 }
2631
2632 unlock_user(target_sembuf, target_addr, 0);
2633
2634 return 0;
2635 }
2636
2637 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2638 {
2639 struct sembuf sops[nsops];
2640
2641 if (target_to_host_sembuf(sops, ptr, nsops))
2642 return -TARGET_EFAULT;
2643
2644 return semop(semid, sops, nsops);
2645 }
2646
2647 struct target_msqid_ds
2648 {
2649 struct target_ipc_perm msg_perm;
2650 abi_ulong msg_stime;
2651 #if TARGET_ABI_BITS == 32
2652 abi_ulong __unused1;
2653 #endif
2654 abi_ulong msg_rtime;
2655 #if TARGET_ABI_BITS == 32
2656 abi_ulong __unused2;
2657 #endif
2658 abi_ulong msg_ctime;
2659 #if TARGET_ABI_BITS == 32
2660 abi_ulong __unused3;
2661 #endif
2662 abi_ulong __msg_cbytes;
2663 abi_ulong msg_qnum;
2664 abi_ulong msg_qbytes;
2665 abi_ulong msg_lspid;
2666 abi_ulong msg_lrpid;
2667 abi_ulong __unused4;
2668 abi_ulong __unused5;
2669 };
2670
2671 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2672 abi_ulong target_addr)
2673 {
2674 struct target_msqid_ds *target_md;
2675
2676 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2677 return -TARGET_EFAULT;
2678 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2679 return -TARGET_EFAULT;
2680 host_md->msg_stime = tswapal(target_md->msg_stime);
2681 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2682 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2683 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2684 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2685 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2686 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2687 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2688 unlock_user_struct(target_md, target_addr, 0);
2689 return 0;
2690 }
2691
2692 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2693 struct msqid_ds *host_md)
2694 {
2695 struct target_msqid_ds *target_md;
2696
2697 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2698 return -TARGET_EFAULT;
2699 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2700 return -TARGET_EFAULT;
2701 target_md->msg_stime = tswapal(host_md->msg_stime);
2702 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2703 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2704 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2705 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2706 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2707 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2708 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2709 unlock_user_struct(target_md, target_addr, 1);
2710 return 0;
2711 }
2712
2713 struct target_msginfo {
2714 int msgpool;
2715 int msgmap;
2716 int msgmax;
2717 int msgmnb;
2718 int msgmni;
2719 int msgssz;
2720 int msgtql;
2721 unsigned short int msgseg;
2722 };
2723
2724 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2725 struct msginfo *host_msginfo)
2726 {
2727 struct target_msginfo *target_msginfo;
2728 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2729 return -TARGET_EFAULT;
2730 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2731 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2732 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2733 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2734 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2735 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2736 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2737 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2738 unlock_user_struct(target_msginfo, target_addr, 1);
2739 return 0;
2740 }
2741
2742 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2743 {
2744 struct msqid_ds dsarg;
2745 struct msginfo msginfo;
2746 abi_long ret = -TARGET_EINVAL;
2747
2748 cmd &= 0xff;
2749
2750 switch (cmd) {
2751 case IPC_STAT:
2752 case IPC_SET:
2753 case MSG_STAT:
2754 if (target_to_host_msqid_ds(&dsarg,ptr))
2755 return -TARGET_EFAULT;
2756 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2757 if (host_to_target_msqid_ds(ptr,&dsarg))
2758 return -TARGET_EFAULT;
2759 break;
2760 case IPC_RMID:
2761 ret = get_errno(msgctl(msgid, cmd, NULL));
2762 break;
2763 case IPC_INFO:
2764 case MSG_INFO:
2765 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2766 if (host_to_target_msginfo(ptr, &msginfo))
2767 return -TARGET_EFAULT;
2768 break;
2769 }
2770
2771 return ret;
2772 }
2773
2774 struct target_msgbuf {
2775 abi_long mtype;
2776 char mtext[1];
2777 };
2778
2779 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2780 unsigned int msgsz, int msgflg)
2781 {
2782 struct target_msgbuf *target_mb;
2783 struct msgbuf *host_mb;
2784 abi_long ret = 0;
2785
2786 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2787 return -TARGET_EFAULT;
2788 host_mb = malloc(msgsz+sizeof(long));
2789 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2790 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2791 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2792 free(host_mb);
2793 unlock_user_struct(target_mb, msgp, 0);
2794
2795 return ret;
2796 }
2797
2798 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2799 unsigned int msgsz, abi_long msgtyp,
2800 int msgflg)
2801 {
2802 struct target_msgbuf *target_mb;
2803 char *target_mtext;
2804 struct msgbuf *host_mb;
2805 abi_long ret = 0;
2806
2807 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2808 return -TARGET_EFAULT;
2809
2810 host_mb = malloc(msgsz+sizeof(long));
2811 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2812
2813 if (ret > 0) {
2814 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2815 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2816 if (!target_mtext) {
2817 ret = -TARGET_EFAULT;
2818 goto end;
2819 }
2820 memcpy(target_mb->mtext, host_mb->mtext, ret);
2821 unlock_user(target_mtext, target_mtext_addr, ret);
2822 }
2823
2824 target_mb->mtype = tswapal(host_mb->mtype);
2825 free(host_mb);
2826
2827 end:
2828 if (target_mb)
2829 unlock_user_struct(target_mb, msgp, 1);
2830 return ret;
2831 }
2832
2833 struct target_shmid_ds
2834 {
2835 struct target_ipc_perm shm_perm;
2836 abi_ulong shm_segsz;
2837 abi_ulong shm_atime;
2838 #if TARGET_ABI_BITS == 32
2839 abi_ulong __unused1;
2840 #endif
2841 abi_ulong shm_dtime;
2842 #if TARGET_ABI_BITS == 32
2843 abi_ulong __unused2;
2844 #endif
2845 abi_ulong shm_ctime;
2846 #if TARGET_ABI_BITS == 32
2847 abi_ulong __unused3;
2848 #endif
2849 int shm_cpid;
2850 int shm_lpid;
2851 abi_ulong shm_nattch;
2852 unsigned long int __unused4;
2853 unsigned long int __unused5;
2854 };
2855
2856 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2857 abi_ulong target_addr)
2858 {
2859 struct target_shmid_ds *target_sd;
2860
2861 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2862 return -TARGET_EFAULT;
2863 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2864 return -TARGET_EFAULT;
2865 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2866 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2867 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2868 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2869 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2870 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2871 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2872 unlock_user_struct(target_sd, target_addr, 0);
2873 return 0;
2874 }
2875
2876 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2877 struct shmid_ds *host_sd)
2878 {
2879 struct target_shmid_ds *target_sd;
2880
2881 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2882 return -TARGET_EFAULT;
2883 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2884 return -TARGET_EFAULT;
2885 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2886 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2887 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2888 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2889 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2890 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2891 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2892 unlock_user_struct(target_sd, target_addr, 1);
2893 return 0;
2894 }
2895
2896 struct target_shminfo {
2897 abi_ulong shmmax;
2898 abi_ulong shmmin;
2899 abi_ulong shmmni;
2900 abi_ulong shmseg;
2901 abi_ulong shmall;
2902 };
2903
2904 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2905 struct shminfo *host_shminfo)
2906 {
2907 struct target_shminfo *target_shminfo;
2908 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2909 return -TARGET_EFAULT;
2910 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2911 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2912 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2913 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2914 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2915 unlock_user_struct(target_shminfo, target_addr, 1);
2916 return 0;
2917 }
2918
2919 struct target_shm_info {
2920 int used_ids;
2921 abi_ulong shm_tot;
2922 abi_ulong shm_rss;
2923 abi_ulong shm_swp;
2924 abi_ulong swap_attempts;
2925 abi_ulong swap_successes;
2926 };
2927
2928 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2929 struct shm_info *host_shm_info)
2930 {
2931 struct target_shm_info *target_shm_info;
2932 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2933 return -TARGET_EFAULT;
2934 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2935 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2936 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2937 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2938 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2939 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2940 unlock_user_struct(target_shm_info, target_addr, 1);
2941 return 0;
2942 }
2943
2944 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2945 {
2946 struct shmid_ds dsarg;
2947 struct shminfo shminfo;
2948 struct shm_info shm_info;
2949 abi_long ret = -TARGET_EINVAL;
2950
2951 cmd &= 0xff;
2952
2953 switch(cmd) {
2954 case IPC_STAT:
2955 case IPC_SET:
2956 case SHM_STAT:
2957 if (target_to_host_shmid_ds(&dsarg, buf))
2958 return -TARGET_EFAULT;
2959 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2960 if (host_to_target_shmid_ds(buf, &dsarg))
2961 return -TARGET_EFAULT;
2962 break;
2963 case IPC_INFO:
2964 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2965 if (host_to_target_shminfo(buf, &shminfo))
2966 return -TARGET_EFAULT;
2967 break;
2968 case SHM_INFO:
2969 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2970 if (host_to_target_shm_info(buf, &shm_info))
2971 return -TARGET_EFAULT;
2972 break;
2973 case IPC_RMID:
2974 case SHM_LOCK:
2975 case SHM_UNLOCK:
2976 ret = get_errno(shmctl(shmid, cmd, NULL));
2977 break;
2978 }
2979
2980 return ret;
2981 }
2982
2983 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2984 {
2985 abi_long raddr;
2986 void *host_raddr;
2987 struct shmid_ds shm_info;
2988 int i,ret;
2989
2990 /* find out the length of the shared memory segment */
2991 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2992 if (is_error(ret)) {
2993 /* can't get length, bail out */
2994 return ret;
2995 }
2996
2997 mmap_lock();
2998
2999 if (shmaddr)
3000 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3001 else {
3002 abi_ulong mmap_start;
3003
3004 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3005
3006 if (mmap_start == -1) {
3007 errno = ENOMEM;
3008 host_raddr = (void *)-1;
3009 } else
3010 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3011 }
3012
3013 if (host_raddr == (void *)-1) {
3014 mmap_unlock();
3015 return get_errno((long)host_raddr);
3016 }
3017 raddr=h2g((unsigned long)host_raddr);
3018
3019 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3020 PAGE_VALID | PAGE_READ |
3021 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3022
3023 for (i = 0; i < N_SHM_REGIONS; i++) {
3024 if (shm_regions[i].start == 0) {
3025 shm_regions[i].start = raddr;
3026 shm_regions[i].size = shm_info.shm_segsz;
3027 break;
3028 }
3029 }
3030
3031 mmap_unlock();
3032 return raddr;
3033
3034 }
3035
3036 static inline abi_long do_shmdt(abi_ulong shmaddr)
3037 {
3038 int i;
3039
3040 for (i = 0; i < N_SHM_REGIONS; ++i) {
3041 if (shm_regions[i].start == shmaddr) {
3042 shm_regions[i].start = 0;
3043 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3044 break;
3045 }
3046 }
3047
3048 return get_errno(shmdt(g2h(shmaddr)));
3049 }
3050
3051 #ifdef TARGET_NR_ipc
3052 /* ??? This only works with linear mappings. */
3053 /* do_ipc() must return target values and target errnos. */
3054 static abi_long do_ipc(unsigned int call, int first,
3055 int second, int third,
3056 abi_long ptr, abi_long fifth)
3057 {
3058 int version;
3059 abi_long ret = 0;
3060
3061 version = call >> 16;
3062 call &= 0xffff;
3063
3064 switch (call) {
3065 case IPCOP_semop:
3066 ret = do_semop(first, ptr, second);
3067 break;
3068
3069 case IPCOP_semget:
3070 ret = get_errno(semget(first, second, third));
3071 break;
3072
3073 case IPCOP_semctl:
3074 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3075 break;
3076
3077 case IPCOP_msgget:
3078 ret = get_errno(msgget(first, second));
3079 break;
3080
3081 case IPCOP_msgsnd:
3082 ret = do_msgsnd(first, ptr, second, third);
3083 break;
3084
3085 case IPCOP_msgctl:
3086 ret = do_msgctl(first, second, ptr);
3087 break;
3088
3089 case IPCOP_msgrcv:
3090 switch (version) {
3091 case 0:
3092 {
3093 struct target_ipc_kludge {
3094 abi_long msgp;
3095 abi_long msgtyp;
3096 } *tmp;
3097
3098 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3099 ret = -TARGET_EFAULT;
3100 break;
3101 }
3102
3103 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3104
3105 unlock_user_struct(tmp, ptr, 0);
3106 break;
3107 }
3108 default:
3109 ret = do_msgrcv(first, ptr, second, fifth, third);
3110 }
3111 break;
3112
3113 case IPCOP_shmat:
3114 switch (version) {
3115 default:
3116 {
3117 abi_ulong raddr;
3118 raddr = do_shmat(first, ptr, second);
3119 if (is_error(raddr))
3120 return get_errno(raddr);
3121 if (put_user_ual(raddr, third))
3122 return -TARGET_EFAULT;
3123 break;
3124 }
3125 case 1:
3126 ret = -TARGET_EINVAL;
3127 break;
3128 }
3129 break;
3130 case IPCOP_shmdt:
3131 ret = do_shmdt(ptr);
3132 break;
3133
3134 case IPCOP_shmget:
3135 /* IPC_* flag values are the same on all linux platforms */
3136 ret = get_errno(shmget(first, second, third));
3137 break;
3138
3139 /* IPC_* and SHM_* command values are the same on all linux platforms */
3140 case IPCOP_shmctl:
3141 ret = do_shmctl(first, second, third);
3142 break;
3143 default:
3144 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3145 ret = -TARGET_ENOSYS;
3146 break;
3147 }
3148 return ret;
3149 }
3150 #endif
3151
3152 /* kernel structure types definitions */
3153
3154 #define STRUCT(name, ...) STRUCT_ ## name,
3155 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3156 enum {
3157 #include "syscall_types.h"
3158 };
3159 #undef STRUCT
3160 #undef STRUCT_SPECIAL
3161
3162 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3163 #define STRUCT_SPECIAL(name)
3164 #include "syscall_types.h"
3165 #undef STRUCT
3166 #undef STRUCT_SPECIAL
3167
3168 typedef struct IOCTLEntry IOCTLEntry;
3169
3170 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3171 int fd, abi_long cmd, abi_long arg);
3172
3173 struct IOCTLEntry {
3174 unsigned int target_cmd;
3175 unsigned int host_cmd;
3176 const char *name;
3177 int access;
3178 do_ioctl_fn *do_ioctl;
3179 const argtype arg_type[5];
3180 };
3181
3182 #define IOC_R 0x0001
3183 #define IOC_W 0x0002
3184 #define IOC_RW (IOC_R | IOC_W)
3185
3186 #define MAX_STRUCT_SIZE 4096
3187
3188 #ifdef CONFIG_FIEMAP
3189 /* So fiemap access checks don't overflow on 32 bit systems.
3190 * This is very slightly smaller than the limit imposed by
3191 * the underlying kernel.
3192 */
3193 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3194 / sizeof(struct fiemap_extent))
3195
3196 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3197 int fd, abi_long cmd, abi_long arg)
3198 {
3199 /* The parameter for this ioctl is a struct fiemap followed
3200 * by an array of struct fiemap_extent whose size is set
3201 * in fiemap->fm_extent_count. The array is filled in by the
3202 * ioctl.
3203 */
3204 int target_size_in, target_size_out;
3205 struct fiemap *fm;
3206 const argtype *arg_type = ie->arg_type;
3207 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3208 void *argptr, *p;
3209 abi_long ret;
3210 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3211 uint32_t outbufsz;
3212 int free_fm = 0;
3213
3214 assert(arg_type[0] == TYPE_PTR);
3215 assert(ie->access == IOC_RW);
3216 arg_type++;
3217 target_size_in = thunk_type_size(arg_type, 0);
3218 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3219 if (!argptr) {
3220 return -TARGET_EFAULT;
3221 }
3222 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3223 unlock_user(argptr, arg, 0);
3224 fm = (struct fiemap *)buf_temp;
3225 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3226 return -TARGET_EINVAL;
3227 }
3228
3229 outbufsz = sizeof (*fm) +
3230 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3231
3232 if (outbufsz > MAX_STRUCT_SIZE) {
3233 /* We can't fit all the extents into the fixed size buffer.
3234 * Allocate one that is large enough and use it instead.
3235 */
3236 fm = malloc(outbufsz);
3237 if (!fm) {
3238 return -TARGET_ENOMEM;
3239 }
3240 memcpy(fm, buf_temp, sizeof(struct fiemap));
3241 free_fm = 1;
3242 }
3243 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3244 if (!is_error(ret)) {
3245 target_size_out = target_size_in;
3246 /* An extent_count of 0 means we were only counting the extents
3247 * so there are no structs to copy
3248 */
3249 if (fm->fm_extent_count != 0) {
3250 target_size_out += fm->fm_mapped_extents * extent_size;
3251 }
3252 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3253 if (!argptr) {
3254 ret = -TARGET_EFAULT;
3255 } else {
3256 /* Convert the struct fiemap */
3257 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3258 if (fm->fm_extent_count != 0) {
3259 p = argptr + target_size_in;
3260 /* ...and then all the struct fiemap_extents */
3261 for (i = 0; i < fm->fm_mapped_extents; i++) {
3262 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3263 THUNK_TARGET);
3264 p += extent_size;
3265 }
3266 }
3267 unlock_user(argptr, arg, target_size_out);
3268 }
3269 }
3270 if (free_fm) {
3271 free(fm);
3272 }
3273 return ret;
3274 }
3275 #endif
3276
3277 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3278 int fd, abi_long cmd, abi_long arg)
3279 {
3280 const argtype *arg_type = ie->arg_type;
3281 int target_size;
3282 void *argptr;
3283 int ret;
3284 struct ifconf *host_ifconf;
3285 uint32_t outbufsz;
3286 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3287 int target_ifreq_size;
3288 int nb_ifreq;
3289 int free_buf = 0;
3290 int i;
3291 int target_ifc_len;
3292 abi_long target_ifc_buf;
3293 int host_ifc_len;
3294 char *host_ifc_buf;
3295
3296 assert(arg_type[0] == TYPE_PTR);
3297 assert(ie->access == IOC_RW);
3298
3299 arg_type++;
3300 target_size = thunk_type_size(arg_type, 0);
3301
3302 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3303 if (!argptr)
3304 return -TARGET_EFAULT;
3305 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3306 unlock_user(argptr, arg, 0);
3307
3308 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3309 target_ifc_len = host_ifconf->ifc_len;
3310 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3311
3312 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3313 nb_ifreq = target_ifc_len / target_ifreq_size;
3314 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3315
3316 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3317 if (outbufsz > MAX_STRUCT_SIZE) {
3318 /* We can't fit all the extents into the fixed size buffer.
3319 * Allocate one that is large enough and use it instead.
3320 */
3321 host_ifconf = malloc(outbufsz);
3322 if (!host_ifconf) {
3323 return -TARGET_ENOMEM;
3324 }
3325 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3326 free_buf = 1;
3327 }
3328 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3329
3330 host_ifconf->ifc_len = host_ifc_len;
3331 host_ifconf->ifc_buf = host_ifc_buf;
3332
3333 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3334 if (!is_error(ret)) {
3335 /* convert host ifc_len to target ifc_len */
3336
3337 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3338 target_ifc_len = nb_ifreq * target_ifreq_size;
3339 host_ifconf->ifc_len = target_ifc_len;
3340
3341 /* restore target ifc_buf */
3342
3343 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3344
3345 /* copy struct ifconf to target user */
3346
3347 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3348 if (!argptr)
3349 return -TARGET_EFAULT;
3350 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3351 unlock_user(argptr, arg, target_size);
3352
3353 /* copy ifreq[] to target user */
3354
3355 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3356 for (i = 0; i < nb_ifreq ; i++) {
3357 thunk_convert(argptr + i * target_ifreq_size,
3358 host_ifc_buf + i * sizeof(struct ifreq),
3359 ifreq_arg_type, THUNK_TARGET);
3360 }
3361 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3362 }
3363
3364 if (free_buf) {
3365 free(host_ifconf);
3366 }
3367
3368 return ret;
3369 }
3370
3371 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3372 abi_long cmd, abi_long arg)
3373 {
3374 void *argptr;
3375 struct dm_ioctl *host_dm;
3376 abi_long guest_data;
3377 uint32_t guest_data_size;
3378 int target_size;
3379 const argtype *arg_type = ie->arg_type;
3380 abi_long ret;
3381 void *big_buf = NULL;
3382 char *host_data;
3383
3384 arg_type++;
3385 target_size = thunk_type_size(arg_type, 0);
3386 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3387 if (!argptr) {
3388 ret = -TARGET_EFAULT;
3389 goto out;
3390 }
3391 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3392 unlock_user(argptr, arg, 0);
3393
3394 /* buf_temp is too small, so fetch things into a bigger buffer */
3395 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3396 memcpy(big_buf, buf_temp, target_size);
3397 buf_temp = big_buf;
3398 host_dm = big_buf;
3399
3400 guest_data = arg + host_dm->data_start;
3401 if ((guest_data - arg) < 0) {
3402 ret = -EINVAL;
3403 goto out;
3404 }
3405 guest_data_size = host_dm->data_size - host_dm->data_start;
3406 host_data = (char*)host_dm + host_dm->data_start;
3407
3408 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3409 switch (ie->host_cmd) {
3410 case DM_REMOVE_ALL:
3411 case DM_LIST_DEVICES:
3412 case DM_DEV_CREATE:
3413 case DM_DEV_REMOVE:
3414 case DM_DEV_SUSPEND:
3415 case DM_DEV_STATUS:
3416 case DM_DEV_WAIT:
3417 case DM_TABLE_STATUS:
3418 case DM_TABLE_CLEAR:
3419 case DM_TABLE_DEPS:
3420 case DM_LIST_VERSIONS:
3421 /* no input data */
3422 break;
3423 case DM_DEV_RENAME:
3424 case DM_DEV_SET_GEOMETRY:
3425 /* data contains only strings */
3426 memcpy(host_data, argptr, guest_data_size);
3427 break;
3428 case DM_TARGET_MSG:
3429 memcpy(host_data, argptr, guest_data_size);
3430 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3431 break;
3432 case DM_TABLE_LOAD:
3433 {
3434 void *gspec = argptr;
3435 void *cur_data = host_data;
3436 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3437 int spec_size = thunk_type_size(arg_type, 0);
3438 int i;
3439
3440 for (i = 0; i < host_dm->target_count; i++) {
3441 struct dm_target_spec *spec = cur_data;
3442 uint32_t next;
3443 int slen;
3444
3445 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3446 slen = strlen((char*)gspec + spec_size) + 1;
3447 next = spec->next;
3448 spec->next = sizeof(*spec) + slen;
3449 strcpy((char*)&spec[1], gspec + spec_size);
3450 gspec += next;
3451 cur_data += spec->next;
3452 }
3453 break;
3454 }
3455 default:
3456 ret = -TARGET_EINVAL;
3457 goto out;
3458 }
3459 unlock_user(argptr, guest_data, 0);
3460
3461 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3462 if (!is_error(ret)) {
3463 guest_data = arg + host_dm->data_start;
3464 guest_data_size = host_dm->data_size - host_dm->data_start;
3465 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3466 switch (ie->host_cmd) {
3467 case DM_REMOVE_ALL:
3468 case DM_DEV_CREATE:
3469 case DM_DEV_REMOVE:
3470 case DM_DEV_RENAME:
3471 case DM_DEV_SUSPEND:
3472 case DM_DEV_STATUS:
3473 case DM_TABLE_LOAD:
3474 case DM_TABLE_CLEAR:
3475 case DM_TARGET_MSG:
3476 case DM_DEV_SET_GEOMETRY:
3477 /* no return data */
3478 break;
3479 case DM_LIST_DEVICES:
3480 {
3481 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3482 uint32_t remaining_data = guest_data_size;
3483 void *cur_data = argptr;
3484 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3485 int nl_size = 12; /* can't use thunk_size due to alignment */
3486
3487 while (1) {
3488 uint32_t next = nl->next;
3489 if (next) {
3490 nl->next = nl_size + (strlen(nl->name) + 1);
3491 }
3492 if (remaining_data < nl->next) {
3493 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3494 break;
3495 }
3496 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3497 strcpy(cur_data + nl_size, nl->name);
3498 cur_data += nl->next;
3499 remaining_data -= nl->next;
3500 if (!next) {
3501 break;
3502 }
3503 nl = (void*)nl + next;
3504 }
3505 break;
3506 }
3507 case DM_DEV_WAIT:
3508 case DM_TABLE_STATUS:
3509 {
3510 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3511 void *cur_data = argptr;
3512 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3513 int spec_size = thunk_type_size(arg_type, 0);
3514 int i;
3515
3516 for (i = 0; i < host_dm->target_count; i++) {
3517 uint32_t next = spec->next;
3518 int slen = strlen((char*)&spec[1]) + 1;
3519 spec->next = (cur_data - argptr) + spec_size + slen;
3520 if (guest_data_size < spec->next) {
3521 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3522 break;
3523 }
3524 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3525 strcpy(cur_data + spec_size, (char*)&spec[1]);
3526 cur_data = argptr + spec->next;
3527 spec = (void*)host_dm + host_dm->data_start + next;
3528 }
3529 break;
3530 }
3531 case DM_TABLE_DEPS:
3532 {
3533 void *hdata = (void*)host_dm + host_dm->data_start;
3534 int count = *(uint32_t*)hdata;
3535 uint64_t *hdev = hdata + 8;
3536 uint64_t *gdev = argptr + 8;
3537 int i;
3538
3539 *(uint32_t*)argptr = tswap32(count);
3540 for (i = 0; i < count; i++) {
3541 *gdev = tswap64(*hdev);
3542 gdev++;
3543 hdev++;
3544 }
3545 break;
3546 }
3547 case DM_LIST_VERSIONS:
3548 {
3549 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3550 uint32_t remaining_data = guest_data_size;
3551 void *cur_data = argptr;
3552 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3553 int vers_size = thunk_type_size(arg_type, 0);
3554
3555 while (1) {
3556 uint32_t next = vers->next;
3557 if (next) {
3558 vers->next = vers_size + (strlen(vers->name) + 1);
3559 }
3560 if (remaining_data < vers->next) {
3561 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3562 break;
3563 }
3564 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3565 strcpy(cur_data + vers_size, vers->name);
3566 cur_data += vers->next;
3567 remaining_data -= vers->next;
3568 if (!next) {
3569 break;
3570 }
3571 vers = (void*)vers + next;
3572 }
3573 break;
3574 }
3575 default:
3576 ret = -TARGET_EINVAL;
3577 goto out;
3578 }
3579 unlock_user(argptr, guest_data, guest_data_size);
3580
3581 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3582 if (!argptr) {
3583 ret = -TARGET_EFAULT;
3584 goto out;
3585 }
3586 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3587 unlock_user(argptr, arg, target_size);
3588 }
3589 out:
3590 if (big_buf) {
3591 free(big_buf);
3592 }
3593 return ret;
3594 }
3595
3596 static IOCTLEntry ioctl_entries[] = {
3597 #define IOCTL(cmd, access, ...) \
3598 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3599 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3600 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3601 #include "ioctls.h"
3602 { 0, 0, },
3603 };
3604
3605 /* ??? Implement proper locking for ioctls. */
3606 /* do_ioctl() Must return target values and target errnos. */
3607 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3608 {
3609 const IOCTLEntry *ie;
3610 const argtype *arg_type;
3611 abi_long ret;
3612 uint8_t buf_temp[MAX_STRUCT_SIZE];
3613 int target_size;
3614 void *argptr;
3615
3616 ie = ioctl_entries;
3617 for(;;) {
3618 if (ie->target_cmd == 0) {
3619 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3620 return -TARGET_ENOSYS;
3621 }
3622 if (ie->target_cmd == cmd)
3623 break;
3624 ie++;
3625 }
3626 arg_type = ie->arg_type;
3627 #if defined(DEBUG)
3628 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3629 #endif
3630 if (ie->do_ioctl) {
3631 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3632 }
3633
3634 switch(arg_type[0]) {
3635 case TYPE_NULL:
3636 /* no argument */
3637 ret = get_errno(ioctl(fd, ie->host_cmd));
3638 break;
3639 case TYPE_PTRVOID:
3640 case TYPE_INT:
3641 /* int argment */
3642 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3643 break;
3644 case TYPE_PTR:
3645 arg_type++;
3646 target_size = thunk_type_size(arg_type, 0);
3647 switch(ie->access) {
3648 case IOC_R:
3649 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3650 if (!is_error(ret)) {
3651 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3652 if (!argptr)
3653 return -TARGET_EFAULT;
3654 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3655 unlock_user(argptr, arg, target_size);
3656 }
3657 break;
3658 case IOC_W:
3659 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3660 if (!argptr)
3661 return -TARGET_EFAULT;
3662 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3663 unlock_user(argptr, arg, 0);
3664 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3665 break;
3666 default:
3667 case IOC_RW:
3668 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3669 if (!argptr)
3670 return -TARGET_EFAULT;
3671 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3672 unlock_user(argptr, arg, 0);
3673 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3674 if (!is_error(ret)) {
3675 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3676 if (!argptr)
3677 return -TARGET_EFAULT;
3678 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3679 unlock_user(argptr, arg, target_size);
3680 }
3681 break;
3682 }
3683 break;
3684 default:
3685 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3686 (long)cmd, arg_type[0]);
3687 ret = -TARGET_ENOSYS;
3688 break;
3689 }
3690 return ret;
3691 }
3692
3693 static const bitmask_transtbl iflag_tbl[] = {
3694 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3695 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3696 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3697 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3698 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3699 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3700 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3701 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3702 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3703 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3704 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3705 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3706 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3707 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3708 { 0, 0, 0, 0 }
3709 };
3710
3711 static const bitmask_transtbl oflag_tbl[] = {
3712 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3713 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3714 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3715 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3716 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3717 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3718 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3719 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3720 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3721 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3722 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3723 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3724 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3725 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3726 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3727 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3728 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3729 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3730 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3731 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3732 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3733 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3734 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3735 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3736 { 0, 0, 0, 0 }
3737 };
3738
3739 static const bitmask_transtbl cflag_tbl[] = {
3740 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3741 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3742 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3743 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3744 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3745 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3746 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3747 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3748 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3749 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3750 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3751 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3752 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3753 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3754 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3755 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3756 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3757 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3758 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3759 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3760 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3761 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3762 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3763 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3764 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3765 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3766 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3767 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3768 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3769 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3770 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3771 { 0, 0, 0, 0 }
3772 };
3773
3774 static const bitmask_transtbl lflag_tbl[] = {
3775 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3776 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3777 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3778 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3779 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3780 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3781 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3782 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3783 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3784 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3785 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3786 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3787 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3788 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3789 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3790 { 0, 0, 0, 0 }
3791 };
3792
3793 static void target_to_host_termios (void *dst, const void *src)
3794 {
3795 struct host_termios *host = dst;
3796 const struct target_termios *target = src;
3797
3798 host->c_iflag =
3799 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3800 host->c_oflag =
3801 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3802 host->c_cflag =
3803 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3804 host->c_lflag =
3805 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3806 host->c_line = target->c_line;
3807
3808 memset(host->c_cc, 0, sizeof(host->c_cc));
3809 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3810 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3811 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3812 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3813 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3814 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3815 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3816 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3817 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3818 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3819 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3820 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3821 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3822 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3823 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3824 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3825 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3826 }
3827
3828 static void host_to_target_termios (void *dst, const void *src)
3829 {
3830 struct target_termios *target = dst;
3831 const struct host_termios *host = src;
3832
3833 target->c_iflag =
3834 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3835 target->c_oflag =
3836 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3837 target->c_cflag =
3838 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3839 target->c_lflag =
3840 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3841 target->c_line = host->c_line;
3842
3843 memset(target->c_cc, 0, sizeof(target->c_cc));
3844 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3845 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3846 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3847 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3848 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3849 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3850 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3851 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3852 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3853 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3854 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3855 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3856 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3857 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3858 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3859 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3860 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3861 }
3862
3863 static const StructEntry struct_termios_def = {
3864 .convert = { host_to_target_termios, target_to_host_termios },
3865 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3866 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3867 };
3868
3869 static bitmask_transtbl mmap_flags_tbl[] = {
3870 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3871 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3872 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3873 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3874 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3875 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3876 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3877 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3878 { 0, 0, 0, 0 }
3879 };
3880
3881 #if defined(TARGET_I386)
3882
3883 /* NOTE: there is really one LDT for all the threads */
3884 static uint8_t *ldt_table;
3885
3886 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3887 {
3888 int size;
3889 void *p;
3890
3891 if (!ldt_table)
3892 return 0;
3893 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3894 if (size > bytecount)
3895 size = bytecount;
3896 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3897 if (!p)
3898 return -TARGET_EFAULT;
3899 /* ??? Should this by byteswapped? */
3900 memcpy(p, ldt_table, size);
3901 unlock_user(p, ptr, size);
3902 return size;
3903 }
3904
3905 /* XXX: add locking support */
3906 static abi_long write_ldt(CPUX86State *env,
3907 abi_ulong ptr, unsigned long bytecount, int oldmode)
3908 {
3909 struct target_modify_ldt_ldt_s ldt_info;
3910 struct target_modify_ldt_ldt_s *target_ldt_info;
3911 int seg_32bit, contents, read_exec_only, limit_in_pages;
3912 int seg_not_present, useable, lm;
3913 uint32_t *lp, entry_1, entry_2;
3914
3915 if (bytecount != sizeof(ldt_info))
3916 return -TARGET_EINVAL;
3917 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3918 return -TARGET_EFAULT;
3919 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3920 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3921 ldt_info.limit = tswap32(target_ldt_info->limit);
3922 ldt_info.flags = tswap32(target_ldt_info->flags);
3923 unlock_user_struct(target_ldt_info, ptr, 0);
3924
3925 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3926 return -TARGET_EINVAL;
3927 seg_32bit = ldt_info.flags & 1;
3928 contents = (ldt_info.flags >> 1) & 3;
3929 read_exec_only = (ldt_info.flags >> 3) & 1;
3930 limit_in_pages = (ldt_info.flags >> 4) & 1;
3931 seg_not_present = (ldt_info.flags >> 5) & 1;
3932 useable = (ldt_info.flags >> 6) & 1;
3933 #ifdef TARGET_ABI32
3934 lm = 0;
3935 #else
3936 lm = (ldt_info.flags >> 7) & 1;
3937 #endif
3938 if (contents == 3) {
3939 if (oldmode)
3940 return -TARGET_EINVAL;
3941 if (seg_not_present == 0)
3942 return -TARGET_EINVAL;
3943 }
3944 /* allocate the LDT */
3945 if (!ldt_table) {
3946 env->ldt.base = target_mmap(0,
3947 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3948 PROT_READ|PROT_WRITE,
3949 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3950 if (env->ldt.base == -1)
3951 return -TARGET_ENOMEM;
3952 memset(g2h(env->ldt.base), 0,
3953 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3954 env->ldt.limit = 0xffff;
3955 ldt_table = g2h(env->ldt.base);
3956 }
3957
3958 /* NOTE: same code as Linux kernel */
3959 /* Allow LDTs to be cleared by the user. */
3960 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3961 if (oldmode ||
3962 (contents == 0 &&
3963 read_exec_only == 1 &&
3964 seg_32bit == 0 &&
3965 limit_in_pages == 0 &&
3966 seg_not_present == 1 &&
3967 useable == 0 )) {
3968 entry_1 = 0;
3969 entry_2 = 0;
3970 goto install;
3971 }
3972 }
3973
3974 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3975 (ldt_info.limit & 0x0ffff);
3976 entry_2 = (ldt_info.base_addr & 0xff000000) |
3977 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3978 (ldt_info.limit & 0xf0000) |
3979 ((read_exec_only ^ 1) << 9) |
3980 (contents << 10) |
3981 ((seg_not_present ^ 1) << 15) |
3982 (seg_32bit << 22) |
3983 (limit_in_pages << 23) |
3984 (lm << 21) |
3985 0x7000;
3986 if (!oldmode)
3987 entry_2 |= (useable << 20);
3988
3989 /* Install the new entry ... */
3990 install:
3991 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3992 lp[0] = tswap32(entry_1);
3993 lp[1] = tswap32(entry_2);
3994 return 0;
3995 }
3996
3997 /* specific and weird i386 syscalls */
3998 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3999 unsigned long bytecount)
4000 {
4001 abi_long ret;
4002
4003 switch (func) {
4004 case 0:
4005 ret = read_ldt(ptr, bytecount);
4006 break;
4007 case 1:
4008 ret = write_ldt(env, ptr, bytecount, 1);
4009 break;
4010 case 0x11:
4011 ret = write_ldt(env, ptr, bytecount, 0);
4012 break;
4013 default:
4014 ret = -TARGET_ENOSYS;
4015 break;
4016 }
4017 return ret;
4018 }
4019
4020 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4021 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4022 {
4023 uint64_t *gdt_table = g2h(env->gdt.base);
4024 struct target_modify_ldt_ldt_s ldt_info;
4025 struct target_modify_ldt_ldt_s *target_ldt_info;
4026 int seg_32bit, contents, read_exec_only, limit_in_pages;
4027 int seg_not_present, useable, lm;
4028 uint32_t *lp, entry_1, entry_2;
4029 int i;
4030
4031 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4032 if (!target_ldt_info)
4033 return -TARGET_EFAULT;
4034 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4035 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4036 ldt_info.limit = tswap32(target_ldt_info->limit);
4037 ldt_info.flags = tswap32(target_ldt_info->flags);
4038 if (ldt_info.entry_number == -1) {
4039 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4040 if (gdt_table[i] == 0) {
4041 ldt_info.entry_number = i;
4042 target_ldt_info->entry_number = tswap32(i);
4043 break;
4044 }
4045 }
4046 }
4047 unlock_user_struct(target_ldt_info, ptr, 1);
4048
4049 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4050 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4051 return -TARGET_EINVAL;
4052 seg_32bit = ldt_info.flags & 1;
4053 contents = (ldt_info.flags >> 1) & 3;
4054 read_exec_only = (ldt_info.flags >> 3) & 1;
4055 limit_in_pages = (ldt_info.flags >> 4) & 1;
4056 seg_not_present = (ldt_info.flags >> 5) & 1;
4057 useable = (ldt_info.flags >> 6) & 1;
4058 #ifdef TARGET_ABI32
4059 lm = 0;
4060 #else
4061 lm = (ldt_info.flags >> 7) & 1;
4062 #endif
4063
4064 if (contents == 3) {
4065 if (seg_not_present == 0)
4066 return -TARGET_EINVAL;
4067 }
4068
4069 /* NOTE: same code as Linux kernel */
4070 /* Allow LDTs to be cleared by the user. */
4071 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4072 if ((contents == 0 &&
4073 read_exec_only == 1 &&
4074 seg_32bit == 0 &&
4075 limit_in_pages == 0 &&
4076 seg_not_present == 1 &&
4077 useable == 0 )) {
4078 entry_1 = 0;
4079 entry_2 = 0;
4080 goto install;
4081 }
4082 }
4083
4084 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4085 (ldt_info.limit & 0x0ffff);
4086 entry_2 = (ldt_info.base_addr & 0xff000000) |
4087 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4088 (ldt_info.limit & 0xf0000) |
4089 ((read_exec_only ^ 1) << 9) |
4090 (contents << 10) |
4091 ((seg_not_present ^ 1) << 15) |
4092 (seg_32bit << 22) |
4093 (limit_in_pages << 23) |
4094 (useable << 20) |
4095 (lm << 21) |
4096 0x7000;
4097
4098 /* Install the new entry ... */
4099 install:
4100 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4101 lp[0] = tswap32(entry_1);
4102 lp[1] = tswap32(entry_2);
4103 return 0;
4104 }
4105
4106 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4107 {
4108 struct target_modify_ldt_ldt_s *target_ldt_info;
4109 uint64_t *gdt_table = g2h(env->gdt.base);
4110 uint32_t base_addr, limit, flags;
4111 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4112 int seg_not_present, useable, lm;
4113 uint32_t *lp, entry_1, entry_2;
4114
4115 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4116 if (!target_ldt_info)
4117 return -TARGET_EFAULT;
4118 idx = tswap32(target_ldt_info->entry_number);
4119 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4120 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4121 unlock_user_struct(target_ldt_info, ptr, 1);
4122 return -TARGET_EINVAL;
4123 }
4124 lp = (uint32_t *)(gdt_table + idx);
4125 entry_1 = tswap32(lp[0]);
4126 entry_2 = tswap32(lp[1]);
4127
4128 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4129 contents = (entry_2 >> 10) & 3;
4130 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4131 seg_32bit = (entry_2 >> 22) & 1;
4132 limit_in_pages = (entry_2 >> 23) & 1;
4133 useable = (entry_2 >> 20) & 1;
4134 #ifdef TARGET_ABI32
4135 lm = 0;
4136 #else
4137 lm = (entry_2 >> 21) & 1;
4138 #endif
4139 flags = (seg_32bit << 0) | (contents << 1) |
4140 (read_exec_only << 3) | (limit_in_pages << 4) |
4141 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4142 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4143 base_addr = (entry_1 >> 16) |
4144 (entry_2 & 0xff000000) |
4145 ((entry_2 & 0xff) << 16);
4146 target_ldt_info->base_addr = tswapal(base_addr);
4147 target_ldt_info->limit = tswap32(limit);
4148 target_ldt_info->flags = tswap32(flags);
4149 unlock_user_struct(target_ldt_info, ptr, 1);
4150 return 0;
4151 }
4152 #endif /* TARGET_I386 && TARGET_ABI32 */
4153
4154 #ifndef TARGET_ABI32
4155 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4156 {
4157 abi_long ret = 0;
4158 abi_ulong val;
4159 int idx;
4160
4161 switch(code) {
4162 case TARGET_ARCH_SET_GS:
4163 case TARGET_ARCH_SET_FS:
4164 if (code == TARGET_ARCH_SET_GS)
4165 idx = R_GS;
4166 else
4167 idx = R_FS;
4168 cpu_x86_load_seg(env, idx, 0);
4169 env->segs[idx].base = addr;
4170 break;
4171 case TARGET_ARCH_GET_GS:
4172 case TARGET_ARCH_GET_FS:
4173 if (code == TARGET_ARCH_GET_GS)
4174 idx = R_GS;
4175 else
4176 idx = R_FS;
4177 val = env->segs[idx].base;
4178 if (put_user(val, addr, abi_ulong))
4179 ret = -TARGET_EFAULT;
4180 break;
4181 default:
4182 ret = -TARGET_EINVAL;
4183 break;
4184 }
4185 return ret;
4186 }
4187 #endif
4188
4189 #endif /* defined(TARGET_I386) */
4190
4191 #define NEW_STACK_SIZE 0x40000
4192
4193 #if defined(CONFIG_USE_NPTL)
4194
4195 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4196 typedef struct {
4197 CPUArchState *env;
4198 pthread_mutex_t mutex;
4199 pthread_cond_t cond;
4200 pthread_t thread;
4201 uint32_t tid;
4202 abi_ulong child_tidptr;
4203 abi_ulong parent_tidptr;
4204 sigset_t sigmask;
4205 } new_thread_info;
4206
4207 static void *clone_func(void *arg)
4208 {
4209 new_thread_info *info = arg;
4210 CPUArchState *env;
4211 TaskState *ts;
4212
4213 env = info->env;
4214 thread_env = env;
4215 ts = (TaskState *)thread_env->opaque;
4216 info->tid = gettid();
4217 env->host_tid = info->tid;
4218 task_settid(ts);
4219 if (info->child_tidptr)
4220 put_user_u32(info->tid, info->child_tidptr);
4221 if (info->parent_tidptr)
4222 put_user_u32(info->tid, info->parent_tidptr);
4223 /* Enable signals. */
4224 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4225 /* Signal to the parent that we're ready. */
4226 pthread_mutex_lock(&info->mutex);
4227 pthread_cond_broadcast(&info->cond);
4228 pthread_mutex_unlock(&info->mutex);
4229 /* Wait until the parent has finshed initializing the tls state. */
4230 pthread_mutex_lock(&clone_lock);
4231 pthread_mutex_unlock(&clone_lock);
4232 cpu_loop(env);
4233 /* never exits */
4234 return NULL;
4235 }
4236 #else
4237
4238 static int clone_func(void *arg)
4239 {
4240 CPUArchState *env = arg;
4241 cpu_loop(env);
4242 /* never exits */
4243 return 0;
4244 }
4245 #endif
4246
4247 /* do_fork() Must return host values and target errnos (unlike most
4248 do_*() functions). */
4249 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4250 abi_ulong parent_tidptr, target_ulong newtls,
4251 abi_ulong child_tidptr)
4252 {
4253 int ret;
4254 TaskState *ts;
4255 CPUArchState *new_env;
4256 #if defined(CONFIG_USE_NPTL)
4257 unsigned int nptl_flags;
4258 sigset_t sigmask;
4259 #else
4260 uint8_t *new_stack;
4261 #endif
4262
4263 /* Emulate vfork() with fork() */
4264 if (flags & CLONE_VFORK)
4265 flags &= ~(CLONE_VFORK | CLONE_VM);
4266
4267 if (flags & CLONE_VM) {
4268 TaskState *parent_ts = (TaskState *)env->opaque;
4269 #if defined(CONFIG_USE_NPTL)
4270 new_thread_info info;
4271 pthread_attr_t attr;
4272 #endif
4273 ts = g_malloc0(sizeof(TaskState));
4274 init_task_state(ts);
4275 /* we create a new CPU instance. */
4276 new_env = cpu_copy(env);
4277 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4278 cpu_reset(ENV_GET_CPU(new_env));
4279 #endif
4280 /* Init regs that differ from the parent. */
4281 cpu_clone_regs(new_env, newsp);
4282 new_env->opaque = ts;
4283 ts->bprm = parent_ts->bprm;
4284 ts->info = parent_ts->info;
4285 #if defined(CONFIG_USE_NPTL)
4286 nptl_flags = flags;
4287 flags &= ~CLONE_NPTL_FLAGS2;
4288
4289 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4290 ts->child_tidptr = child_tidptr;
4291 }
4292
4293 if (nptl_flags & CLONE_SETTLS)
4294 cpu_set_tls (new_env, newtls);
4295
4296 /* Grab a mutex so that thread setup appears atomic. */
4297 pthread_mutex_lock(&clone_lock);
4298
4299 memset(&info, 0, sizeof(info));
4300 pthread_mutex_init(&info.mutex, NULL);
4301 pthread_mutex_lock(&info.mutex);
4302 pthread_cond_init(&info.cond, NULL);
4303 info.env = new_env;
4304 if (nptl_flags & CLONE_CHILD_SETTID)
4305 info.child_tidptr = child_tidptr;
4306 if (nptl_flags & CLONE_PARENT_SETTID)
4307 info.parent_tidptr = parent_tidptr;
4308
4309 ret = pthread_attr_init(&attr);
4310 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4311 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4312 /* It is not safe to deliver signals until the child has finished
4313 initializing, so temporarily block all signals. */
4314 sigfillset(&sigmask);
4315 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4316
4317 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4318 /* TODO: Free new CPU state if thread creation failed. */
4319
4320 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4321 pthread_attr_destroy(&attr);
4322 if (ret == 0) {
4323 /* Wait for the child to initialize. */
4324 pthread_cond_wait(&info.cond, &info.mutex);
4325 ret = info.tid;
4326 if (flags & CLONE_PARENT_SETTID)
4327 put_user_u32(ret, parent_tidptr);
4328 } else {
4329 ret = -1;
4330 }
4331 pthread_mutex_unlock(&info.mutex);
4332 pthread_cond_destroy(&info.cond);
4333 pthread_mutex_destroy(&info.mutex);
4334 pthread_mutex_unlock(&clone_lock);
4335 #else
4336 if (flags & CLONE_NPTL_FLAGS2)
4337 return -EINVAL;
4338 /* This is probably going to die very quickly, but do it anyway. */
4339 new_stack = g_malloc0 (NEW_STACK_SIZE);
4340 #ifdef __ia64__
4341 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4342 #else
4343 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4344 #endif
4345 #endif
4346 } else {
4347 /* if no CLONE_VM, we consider it is a fork */
4348 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4349 return -EINVAL;
4350 fork_start();
4351 ret = fork();
4352 if (ret == 0) {
4353 /* Child Process. */
4354 cpu_clone_regs(env, newsp);
4355 fork_end(1);
4356 #if defined(CONFIG_USE_NPTL)
4357 /* There is a race condition here. The parent process could
4358 theoretically read the TID in the child process before the child
4359 tid is set. This would require using either ptrace
4360 (not implemented) or having *_tidptr to point at a shared memory
4361 mapping. We can't repeat the spinlock hack used above because
4362 the child process gets its own copy of the lock. */
4363 if (flags & CLONE_CHILD_SETTID)
4364 put_user_u32(gettid(), child_tidptr);
4365 if (flags & CLONE_PARENT_SETTID)
4366 put_user_u32(gettid(), parent_tidptr);
4367 ts = (TaskState *)env->opaque;
4368 if (flags & CLONE_SETTLS)
4369 cpu_set_tls (env, newtls);
4370 if (flags & CLONE_CHILD_CLEARTID)
4371 ts->child_tidptr = child_tidptr;
4372 #endif
4373 } else {
4374 fork_end(0);
4375 }
4376 }
4377 return ret;
4378 }
4379
4380 /* warning : doesn't handle linux specific flags... */
4381 static int target_to_host_fcntl_cmd(int cmd)
4382 {
4383 switch(cmd) {
4384 case TARGET_F_DUPFD:
4385 case TARGET_F_GETFD:
4386 case TARGET_F_SETFD:
4387 case TARGET_F_GETFL:
4388 case TARGET_F_SETFL:
4389 return cmd;
4390 case TARGET_F_GETLK:
4391 return F_GETLK;
4392 case TARGET_F_SETLK:
4393 return F_SETLK;
4394 case TARGET_F_SETLKW:
4395 return F_SETLKW;
4396 case TARGET_F_GETOWN:
4397 return F_GETOWN;
4398 case TARGET_F_SETOWN:
4399 return F_SETOWN;
4400 case TARGET_F_GETSIG:
4401 return F_GETSIG;
4402 case TARGET_F_SETSIG:
4403 return F_SETSIG;
4404 #if TARGET_ABI_BITS == 32
4405 case TARGET_F_GETLK64:
4406 return F_GETLK64;
4407 case TARGET_F_SETLK64:
4408 return F_SETLK64;
4409 case TARGET_F_SETLKW64:
4410 return F_SETLKW64;
4411 #endif
4412 case TARGET_F_SETLEASE:
4413 return F_SETLEASE;
4414 case TARGET_F_GETLEASE:
4415 return F_GETLEASE;
4416 #ifdef F_DUPFD_CLOEXEC
4417 case TARGET_F_DUPFD_CLOEXEC:
4418 return F_DUPFD_CLOEXEC;
4419 #endif
4420 case TARGET_F_NOTIFY:
4421 return F_NOTIFY;
4422 default:
4423 return -TARGET_EINVAL;
4424 }
4425 return -TARGET_EINVAL;
4426 }
4427
4428 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4429 {
4430 struct flock fl;
4431 struct target_flock *target_fl;
4432 struct flock64 fl64;
4433 struct target_flock64 *target_fl64;
4434 abi_long ret;
4435 int host_cmd = target_to_host_fcntl_cmd(cmd);
4436
4437 if (host_cmd == -TARGET_EINVAL)
4438 return host_cmd;
4439
4440 switch(cmd) {
4441 case TARGET_F_GETLK:
4442 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4443 return -TARGET_EFAULT;
4444 fl.l_type = tswap16(target_fl->l_type);
4445 fl.l_whence = tswap16(target_fl->l_whence);
4446 fl.l_start = tswapal(target_fl->l_start);
4447 fl.l_len = tswapal(target_fl->l_len);
4448 fl.l_pid = tswap32(target_fl->l_pid);
4449 unlock_user_struct(target_fl, arg, 0);
4450 ret = get_errno(fcntl(fd, host_cmd, &fl));
4451 if (ret == 0) {
4452 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4453 return -TARGET_EFAULT;
4454 target_fl->l_type = tswap16(fl.l_type);
4455 target_fl->l_whence = tswap16(fl.l_whence);
4456 target_fl->l_start = tswapal(fl.l_start);
4457 target_fl->l_len = tswapal(fl.l_len);
4458 target_fl->l_pid = tswap32(fl.l_pid);
4459 unlock_user_struct(target_fl, arg, 1);
4460 }
4461 break;
4462
4463 case TARGET_F_SETLK:
4464 case TARGET_F_SETLKW:
4465 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4466 return -TARGET_EFAULT;
4467 fl.l_type = tswap16(target_fl->l_type);
4468 fl.l_whence = tswap16(target_fl->l_whence);
4469 fl.l_start = tswapal(target_fl->l_start);
4470 fl.l_len = tswapal(target_fl->l_len);
4471 fl.l_pid = tswap32(target_fl->l_pid);
4472 unlock_user_struct(target_fl, arg, 0);
4473 ret = get_errno(fcntl(fd, host_cmd, &fl));
4474 break;
4475
4476 case TARGET_F_GETLK64:
4477 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4478 return -TARGET_EFAULT;
4479 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4480 fl64.l_whence = tswap16(target_fl64->l_whence);
4481 fl64.l_start = tswap64(target_fl64->l_start);
4482 fl64.l_len = tswap64(target_fl64->l_len);
4483 fl64.l_pid = tswap32(target_fl64->l_pid);
4484 unlock_user_struct(target_fl64, arg, 0);
4485 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4486 if (ret == 0) {
4487 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4488 return -TARGET_EFAULT;
4489 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4490 target_fl64->l_whence = tswap16(fl64.l_whence);
4491 target_fl64->l_start = tswap64(fl64.l_start);
4492 target_fl64->l_len = tswap64(fl64.l_len);
4493 target_fl64->l_pid = tswap32(fl64.l_pid);
4494 unlock_user_struct(target_fl64, arg, 1);
4495 }
4496 break;
4497 case TARGET_F_SETLK64:
4498 case TARGET_F_SETLKW64:
4499 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4500 return -TARGET_EFAULT;
4501 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4502 fl64.l_whence = tswap16(target_fl64->l_whence);
4503 fl64.l_start = tswap64(target_fl64->l_start);
4504 fl64.l_len = tswap64(target_fl64->l_len);
4505 fl64.l_pid = tswap32(target_fl64->l_pid);
4506 unlock_user_struct(target_fl64, arg, 0);
4507 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4508 break;
4509
4510 case TARGET_F_GETFL:
4511 ret = get_errno(fcntl(fd, host_cmd, arg));
4512 if (ret >= 0) {
4513 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4514 }
4515 break;
4516
4517 case TARGET_F_SETFL:
4518 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4519 break;
4520
4521 case TARGET_F_SETOWN:
4522 case TARGET_F_GETOWN:
4523 case TARGET_F_SETSIG:
4524 case TARGET_F_GETSIG:
4525 case TARGET_F_SETLEASE:
4526 case TARGET_F_GETLEASE:
4527 ret = get_errno(fcntl(fd, host_cmd, arg));
4528 break;
4529
4530 default:
4531 ret = get_errno(fcntl(fd, cmd, arg));
4532 break;
4533 }
4534 return ret;
4535 }
4536
4537 #ifdef USE_UID16
4538
4539 static inline int high2lowuid(int uid)
4540 {
4541 if (uid > 65535)
4542 return 65534;
4543 else
4544 return uid;
4545 }
4546
4547 static inline int high2lowgid(int gid)
4548 {
4549 if (gid > 65535)
4550 return 65534;
4551 else
4552 return gid;
4553 }
4554
4555 static inline int low2highuid(int uid)
4556 {
4557 if ((int16_t)uid == -1)
4558 return -1;
4559 else
4560 return uid;
4561 }
4562
4563 static inline int low2highgid(int gid)
4564 {
4565 if ((int16_t)gid == -1)
4566 return -1;
4567 else
4568 return gid;
4569 }
4570 static inline int tswapid(int id)
4571 {
4572 return tswap16(id);
4573 }
4574 #else /* !USE_UID16 */
4575 static inline int high2lowuid(int uid)
4576 {
4577 return uid;
4578 }
4579 static inline int high2lowgid(int gid)
4580 {
4581 return gid;
4582 }
4583 static inline int low2highuid(int uid)
4584 {
4585 return uid;
4586 }
4587 static inline int low2highgid(int gid)
4588 {
4589 return gid;
4590 }
4591 static inline int tswapid(int id)
4592 {
4593 return tswap32(id);
4594 }
4595 #endif /* USE_UID16 */
4596
4597 void syscall_init(void)
4598 {
4599 IOCTLEntry *ie;
4600 const argtype *arg_type;
4601 int size;
4602 int i;
4603
4604 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4605 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4606 #include "syscall_types.h"
4607 #undef STRUCT
4608 #undef STRUCT_SPECIAL
4609
4610 /* we patch the ioctl size if necessary. We rely on the fact that
4611 no ioctl has all the bits at '1' in the size field */
4612 ie = ioctl_entries;
4613 while (ie->target_cmd != 0) {
4614 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4615 TARGET_IOC_SIZEMASK) {
4616 arg_type = ie->arg_type;
4617 if (arg_type[0] != TYPE_PTR) {
4618 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4619 ie->target_cmd);
4620 exit(1);
4621 }
4622 arg_type++;
4623 size = thunk_type_size(arg_type, 0);
4624 ie->target_cmd = (ie->target_cmd &
4625 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4626 (size << TARGET_IOC_SIZESHIFT);
4627 }
4628
4629 /* Build target_to_host_errno_table[] table from
4630 * host_to_target_errno_table[]. */
4631 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4632 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4633
4634 /* automatic consistency check if same arch */
4635 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4636 (defined(__x86_64__) && defined(TARGET_X86_64))
4637 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4638 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4639 ie->name, ie->target_cmd, ie->host_cmd);
4640 }
4641 #endif
4642 ie++;
4643 }
4644 }
4645
4646 #if TARGET_ABI_BITS == 32
4647 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4648 {
4649 #ifdef TARGET_WORDS_BIGENDIAN
4650 return ((uint64_t)word0 << 32) | word1;
4651 #else
4652 return ((uint64_t)word1 << 32) | word0;
4653 #endif
4654 }
4655 #else /* TARGET_ABI_BITS == 32 */
4656 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4657 {
4658 return word0;
4659 }
4660 #endif /* TARGET_ABI_BITS != 32 */
4661
4662 #ifdef TARGET_NR_truncate64
4663 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4664 abi_long arg2,
4665 abi_long arg3,
4666 abi_long arg4)
4667 {
4668 if (regpairs_aligned(cpu_env)) {
4669 arg2 = arg3;
4670 arg3 = arg4;
4671 }
4672 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4673 }
4674 #endif
4675
4676 #ifdef TARGET_NR_ftruncate64
4677 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4678 abi_long arg2,
4679 abi_long arg3,
4680 abi_long arg4)
4681 {
4682 if (regpairs_aligned(cpu_env)) {
4683 arg2 = arg3;
4684 arg3 = arg4;
4685 }
4686 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4687 }
4688 #endif
4689
4690 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4691 abi_ulong target_addr)
4692 {
4693 struct target_timespec *target_ts;
4694
4695 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4696 return -TARGET_EFAULT;
4697 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4698 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4699 unlock_user_struct(target_ts, target_addr, 0);
4700 return 0;
4701 }
4702
4703 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4704 struct timespec *host_ts)
4705 {
4706 struct target_timespec *target_ts;
4707
4708 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4709 return -TARGET_EFAULT;
4710 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4711 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4712 unlock_user_struct(target_ts, target_addr, 1);
4713 return 0;
4714 }
4715
4716 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4717 static inline abi_long host_to_target_stat64(void *cpu_env,
4718 abi_ulong target_addr,
4719 struct stat *host_st)
4720 {
4721 #ifdef TARGET_ARM
4722 if (((CPUARMState *)cpu_env)->eabi) {
4723 struct target_eabi_stat64 *target_st;
4724
4725 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4726 return -TARGET_EFAULT;
4727 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4728 __put_user(host_st->st_dev, &target_st->st_dev);
4729 __put_user(host_st->st_ino, &target_st->st_ino);
4730 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4731 __put_user(host_st->st_ino, &target_st->__st_ino);
4732 #endif
4733 __put_user(host_st->st_mode, &target_st->st_mode);
4734 __put_user(host_st->st_nlink, &target_st->st_nlink);
4735 __put_user(host_st->st_uid, &target_st->st_uid);
4736 __put_user(host_st->st_gid, &target_st->st_gid);
4737 __put_user(host_st->st_rdev, &target_st->st_rdev);
4738 __put_user(host_st->st_size, &target_st->st_size);
4739 __put_user(host_st->st_blksize, &target_st->st_blksize);
4740 __put_user(host_st->st_blocks, &target_st->st_blocks);
4741 __put_user(host_st->st_atime, &target_st->target_st_atime);
4742 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4743 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4744 unlock_user_struct(target_st, target_addr, 1);
4745 } else
4746 #endif
4747 {
4748 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4749 struct target_stat *target_st;
4750 #else
4751 struct target_stat64 *target_st;
4752 #endif
4753
4754 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4755 return -TARGET_EFAULT;
4756 memset(target_st, 0, sizeof(*target_st));
4757 __put_user(host_st->st_dev, &target_st->st_dev);
4758 __put_user(host_st->st_ino, &target_st->st_ino);
4759 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4760 __put_user(host_st->st_ino, &target_st->__st_ino);
4761 #endif
4762 __put_user(host_st->st_mode, &target_st->st_mode);
4763 __put_user(host_st->st_nlink, &target_st->st_nlink);
4764 __put_user(host_st->st_uid, &target_st->st_uid);
4765 __put_user(host_st->st_gid, &target_st->st_gid);
4766 __put_user(host_st->st_rdev, &target_st->st_rdev);
4767 /* XXX: better use of kernel struct */
4768 __put_user(host_st->st_size, &target_st->st_size);
4769 __put_user(host_st->st_blksize, &target_st->st_blksize);
4770 __put_user(host_st->st_blocks, &target_st->st_blocks);
4771 __put_user(host_st->st_atime, &target_st->target_st_atime);
4772 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4773 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4774 unlock_user_struct(target_st, target_addr, 1);
4775 }
4776
4777 return 0;
4778 }
4779 #endif
4780
4781 #if defined(CONFIG_USE_NPTL)
4782 /* ??? Using host futex calls even when target atomic operations
4783 are not really atomic probably breaks things. However implementing
4784 futexes locally would make futexes shared between multiple processes
4785 tricky. However they're probably useless because guest atomic
4786 operations won't work either. */
4787 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4788 target_ulong uaddr2, int val3)
4789 {
4790 struct timespec ts, *pts;
4791 int base_op;
4792
4793 /* ??? We assume FUTEX_* constants are the same on both host
4794 and target. */
4795 #ifdef FUTEX_CMD_MASK
4796 base_op = op & FUTEX_CMD_MASK;
4797 #else
4798 base_op = op;
4799 #endif
4800 switch (base_op) {
4801 case FUTEX_WAIT:
4802 if (timeout) {
4803 pts = &ts;
4804 target_to_host_timespec(pts, timeout);
4805 } else {
4806 pts = NULL;
4807 }
4808 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4809 pts, NULL, 0));
4810 case FUTEX_WAKE:
4811 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4812 case FUTEX_FD:
4813 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4814 case FUTEX_REQUEUE:
4815 case FUTEX_CMP_REQUEUE:
4816 case FUTEX_WAKE_OP:
4817 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4818 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4819 But the prototype takes a `struct timespec *'; insert casts
4820 to satisfy the compiler. We do not need to tswap TIMEOUT
4821 since it's not compared to guest memory. */
4822 pts = (struct timespec *)(uintptr_t) timeout;
4823 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4824 g2h(uaddr2),
4825 (base_op == FUTEX_CMP_REQUEUE
4826 ? tswap32(val3)
4827 : val3)));
4828 default:
4829 return -TARGET_ENOSYS;
4830 }
4831 }
4832 #endif
4833
4834 /* Map host to target signal numbers for the wait family of syscalls.
4835 Assume all other status bits are the same. */
4836 static int host_to_target_waitstatus(int status)
4837 {
4838 if (WIFSIGNALED(status)) {
4839 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4840 }
4841 if (WIFSTOPPED(status)) {
4842 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4843 | (status & 0xff);
4844 }
4845 return status;
4846 }
4847
4848 int get_osversion(void)
4849 {
4850 static int osversion;
4851 struct new_utsname buf;
4852 const char *s;
4853 int i, n, tmp;
4854 if (osversion)
4855 return osversion;
4856 if (qemu_uname_release && *qemu_uname_release) {
4857 s = qemu_uname_release;
4858 } else {
4859 if (sys_uname(&buf))
4860 return 0;
4861 s = buf.release;
4862 }
4863 tmp = 0;
4864 for (i = 0; i < 3; i++) {
4865 n = 0;
4866 while (*s >= '0' && *s <= '9') {
4867 n *= 10;
4868 n += *s - '0';
4869 s++;
4870 }
4871 tmp = (tmp << 8) + n;
4872 if (*s == '.')
4873 s++;
4874 }
4875 osversion = tmp;
4876 return osversion;
4877 }
4878
4879
4880 static int open_self_maps(void *cpu_env, int fd)
4881 {
4882 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4883 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4884 #endif
4885 FILE *fp;
4886 char *line = NULL;
4887 size_t len = 0;
4888 ssize_t read;
4889
4890 fp = fopen("/proc/self/maps", "r");
4891 if (fp == NULL) {
4892 return -EACCES;
4893 }
4894
4895 while ((read = getline(&line, &len, fp)) != -1) {
4896 int fields, dev_maj, dev_min, inode;
4897 uint64_t min, max, offset;
4898 char flag_r, flag_w, flag_x, flag_p;
4899 char path[512] = "";
4900 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4901 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4902 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4903
4904 if ((fields < 10) || (fields > 11)) {
4905 continue;
4906 }
4907 if (!strncmp(path, "[stack]", 7)) {
4908 continue;
4909 }
4910 if (h2g_valid(min) && h2g_valid(max)) {
4911 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4912 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n",
4913 h2g(min), h2g(max), flag_r, flag_w,
4914 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4915 path[0] ? " " : "", path);
4916 }
4917 }
4918
4919 free(line);
4920 fclose(fp);
4921
4922 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4923 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4924 (unsigned long long)ts->info->stack_limit,
4925 (unsigned long long)(ts->stack_base + (TARGET_PAGE_SIZE - 1))
4926 & TARGET_PAGE_MASK,
4927 (unsigned long long)0);
4928 #endif
4929
4930 return 0;
4931 }
4932
4933 static int open_self_stat(void *cpu_env, int fd)
4934 {
4935 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4936 abi_ulong start_stack = ts->info->start_stack;
4937 int i;
4938
4939 for (i = 0; i < 44; i++) {
4940 char buf[128];
4941 int len;
4942 uint64_t val = 0;
4943
4944 if (i == 0) {
4945 /* pid */
4946 val = getpid();
4947 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4948 } else if (i == 1) {
4949 /* app name */
4950 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
4951 } else if (i == 27) {
4952 /* stack bottom */
4953 val = start_stack;
4954 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4955 } else {
4956 /* for the rest, there is MasterCard */
4957 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
4958 }
4959
4960 len = strlen(buf);
4961 if (write(fd, buf, len) != len) {
4962 return -1;
4963 }
4964 }
4965
4966 return 0;
4967 }
4968
4969 static int open_self_auxv(void *cpu_env, int fd)
4970 {
4971 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4972 abi_ulong auxv = ts->info->saved_auxv;
4973 abi_ulong len = ts->info->auxv_len;
4974 char *ptr;
4975
4976 /*
4977 * Auxiliary vector is stored in target process stack.
4978 * read in whole auxv vector and copy it to file
4979 */
4980 ptr = lock_user(VERIFY_READ, auxv, len, 0);
4981 if (ptr != NULL) {
4982 while (len > 0) {
4983 ssize_t r;
4984 r = write(fd, ptr, len);
4985 if (r <= 0) {
4986 break;
4987 }
4988 len -= r;
4989 ptr += r;
4990 }
4991 lseek(fd, 0, SEEK_SET);
4992 unlock_user(ptr, auxv, len);
4993 }
4994
4995 return 0;
4996 }
4997
4998 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
4999 {
5000 struct fake_open {
5001 const char *filename;
5002 int (*fill)(void *cpu_env, int fd);
5003 };
5004 const struct fake_open *fake_open;
5005 static const struct fake_open fakes[] = {
5006 { "/proc/self/maps", open_self_maps },
5007 { "/proc/self/stat", open_self_stat },
5008 { "/proc/self/auxv", open_self_auxv },
5009 { NULL, NULL }
5010 };
5011
5012 for (fake_open = fakes; fake_open->filename; fake_open++) {
5013 if (!strncmp(pathname, fake_open->filename,
5014 strlen(fake_open->filename))) {
5015 break;
5016 }
5017 }
5018
5019 if (fake_open->filename) {
5020 const char *tmpdir;
5021 char filename[PATH_MAX];
5022 int fd, r;
5023
5024 /* create temporary file to map stat to */
5025 tmpdir = getenv("TMPDIR");
5026 if (!tmpdir)
5027 tmpdir = "/tmp";
5028 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5029 fd = mkstemp(filename);
5030 if (fd < 0) {
5031 return fd;
5032 }
5033 unlink(filename);
5034
5035 if ((r = fake_open->fill(cpu_env, fd))) {
5036 close(fd);
5037 return r;
5038 }
5039 lseek(fd, 0, SEEK_SET);
5040
5041 return fd;
5042 }
5043
5044 return get_errno(open(path(pathname), flags, mode));
5045 }
5046
5047 /* do_syscall() should always have a single exit point at the end so
5048 that actions, such as logging of syscall results, can be performed.
5049 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5050 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5051 abi_long arg2, abi_long arg3, abi_long arg4,
5052 abi_long arg5, abi_long arg6, abi_long arg7,
5053 abi_long arg8)
5054 {
5055 abi_long ret;
5056 struct stat st;
5057 struct statfs stfs;
5058 void *p;
5059
5060 #ifdef DEBUG
5061 gemu_log("syscall %d", num);
5062 #endif
5063 if(do_strace)
5064 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5065
5066 switch(num) {
5067 case TARGET_NR_exit:
5068 #ifdef CONFIG_USE_NPTL
5069 /* In old applications this may be used to implement _exit(2).
5070 However in threaded applictions it is used for thread termination,
5071 and _exit_group is used for application termination.
5072 Do thread termination if we have more then one thread. */
5073 /* FIXME: This probably breaks if a signal arrives. We should probably
5074 be disabling signals. */
5075 if (first_cpu->next_cpu) {
5076 TaskState *ts;
5077 CPUArchState **lastp;
5078 CPUArchState *p;
5079
5080 cpu_list_lock();
5081 lastp = &first_cpu;
5082 p = first_cpu;
5083 while (p && p != (CPUArchState *)cpu_env) {
5084 lastp = &p->next_cpu;
5085 p = p->next_cpu;
5086 }
5087 /* If we didn't find the CPU for this thread then something is
5088 horribly wrong. */
5089 if (!p)
5090 abort();
5091 /* Remove the CPU from the list. */
5092 *lastp = p->next_cpu;
5093 cpu_list_unlock();
5094 ts = ((CPUArchState *)cpu_env)->opaque;
5095 if (ts->child_tidptr) {
5096 put_user_u32(0, ts->child_tidptr);
5097 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5098 NULL, NULL, 0);
5099 }
5100 thread_env = NULL;
5101 object_delete(OBJECT(ENV_GET_CPU(cpu_env)));
5102 g_free(ts);
5103 pthread_exit(NULL);
5104 }
5105 #endif
5106 #ifdef TARGET_GPROF
5107 _mcleanup();
5108 #endif
5109 gdb_exit(cpu_env, arg1);
5110 _exit(arg1);
5111 ret = 0; /* avoid warning */
5112 break;
5113 case TARGET_NR_read:
5114 if (arg3 == 0)
5115 ret = 0;
5116 else {
5117 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5118 goto efault;
5119 ret = get_errno(read(arg1, p, arg3));
5120 unlock_user(p, arg2, ret);
5121 }
5122 break;
5123 case TARGET_NR_write:
5124 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5125 goto efault;
5126 ret = get_errno(write(arg1, p, arg3));
5127 unlock_user(p, arg2, 0);
5128 break;
5129 case TARGET_NR_open:
5130 if (!(p = lock_user_string(arg1)))
5131 goto efault;
5132 ret = get_errno(do_open(cpu_env, p,
5133 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5134 arg3));
5135 unlock_user(p, arg1, 0);
5136 break;
5137 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5138 case TARGET_NR_openat:
5139 if (!(p = lock_user_string(arg2)))
5140 goto efault;
5141 ret = get_errno(sys_openat(arg1,
5142 path(p),
5143 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5144 arg4));
5145 unlock_user(p, arg2, 0);
5146 break;
5147 #endif
5148 case TARGET_NR_close:
5149 ret = get_errno(close(arg1));
5150 break;
5151 case TARGET_NR_brk:
5152 ret = do_brk(arg1);
5153 break;
5154 case TARGET_NR_fork:
5155 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5156 break;
5157 #ifdef TARGET_NR_waitpid
5158 case TARGET_NR_waitpid:
5159 {
5160 int status;
5161 ret = get_errno(waitpid(arg1, &status, arg3));
5162 if (!is_error(ret) && arg2 && ret
5163 && put_user_s32(host_to_target_waitstatus(status), arg2))
5164 goto efault;
5165 }
5166 break;
5167 #endif
5168 #ifdef TARGET_NR_waitid
5169 case TARGET_NR_waitid:
5170 {
5171 siginfo_t info;
5172 info.si_pid = 0;
5173 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5174 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5175 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5176 goto efault;
5177 host_to_target_siginfo(p, &info);
5178 unlock_user(p, arg3, sizeof(target_siginfo_t));
5179 }
5180 }
5181 break;
5182 #endif
5183 #ifdef TARGET_NR_creat /* not on alpha */
5184 case TARGET_NR_creat:
5185 if (!(p = lock_user_string(arg1)))
5186 goto efault;
5187 ret = get_errno(creat(p, arg2));
5188 unlock_user(p, arg1, 0);
5189 break;
5190 #endif
5191 case TARGET_NR_link:
5192 {
5193 void * p2;
5194 p = lock_user_string(arg1);
5195 p2 = lock_user_string(arg2);
5196 if (!p || !p2)
5197 ret = -TARGET_EFAULT;
5198 else
5199 ret = get_errno(link(p, p2));
5200 unlock_user(p2, arg2, 0);
5201 unlock_user(p, arg1, 0);
5202 }
5203 break;
5204 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5205 case TARGET_NR_linkat:
5206 {
5207 void * p2 = NULL;
5208 if (!arg2 || !arg4)
5209 goto efault;
5210 p = lock_user_string(arg2);
5211 p2 = lock_user_string(arg4);
5212 if (!p || !p2)
5213 ret = -TARGET_EFAULT;
5214 else
5215 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
5216 unlock_user(p, arg2, 0);
5217 unlock_user(p2, arg4, 0);
5218 }
5219 break;
5220 #endif
5221 case TARGET_NR_unlink:
5222 if (!(p = lock_user_string(arg1)))
5223 goto efault;
5224 ret = get_errno(unlink(p));
5225 unlock_user(p, arg1, 0);
5226 break;
5227 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5228 case TARGET_NR_unlinkat:
5229 if (!(p = lock_user_string(arg2)))
5230 goto efault;
5231 ret = get_errno(sys_unlinkat(arg1, p, arg3));
5232 unlock_user(p, arg2, 0);
5233 break;
5234 #endif
5235 case TARGET_NR_execve:
5236 {
5237 char **argp, **envp;
5238 int argc, envc;
5239 abi_ulong gp;
5240 abi_ulong guest_argp;
5241 abi_ulong guest_envp;
5242 abi_ulong addr;
5243 char **q;
5244 int total_size = 0;
5245
5246 argc = 0;
5247 guest_argp = arg2;
5248 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5249 if (get_user_ual(addr, gp))
5250 goto efault;
5251 if (!addr)
5252 break;
5253 argc++;
5254 }
5255 envc = 0;
5256 guest_envp = arg3;
5257 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5258 if (get_user_ual(addr, gp))
5259 goto efault;
5260 if (!addr)
5261 break;
5262 envc++;
5263 }
5264
5265 argp = alloca((argc + 1) * sizeof(void *));
5266 envp = alloca((envc + 1) * sizeof(void *));
5267
5268 for (gp = guest_argp, q = argp; gp;
5269 gp += sizeof(abi_ulong), q++) {
5270 if (get_user_ual(addr, gp))
5271 goto execve_efault;
5272 if (!addr)
5273 break;
5274 if (!(*q = lock_user_string(addr)))
5275 goto execve_efault;
5276 total_size += strlen(*q) + 1;
5277 }
5278 *q = NULL;
5279
5280 for (gp = guest_envp, q = envp; gp;
5281 gp += sizeof(abi_ulong), q++) {
5282 if (get_user_ual(addr, gp))
5283 goto execve_efault;
5284 if (!addr)
5285 break;
5286 if (!(*q = lock_user_string(addr)))
5287 goto execve_efault;
5288 total_size += strlen(*q) + 1;
5289 }
5290 *q = NULL;
5291
5292 /* This case will not be caught by the host's execve() if its
5293 page size is bigger than the target's. */
5294 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5295 ret = -TARGET_E2BIG;
5296 goto execve_end;
5297 }
5298 if (!(p = lock_user_string(arg1)))
5299 goto execve_efault;
5300 ret = get_errno(execve(p, argp, envp));
5301 unlock_user(p, arg1, 0);
5302
5303 goto execve_end;
5304
5305 execve_efault:
5306 ret = -TARGET_EFAULT;
5307
5308 execve_end:
5309 for (gp = guest_argp, q = argp; *q;
5310 gp += sizeof(abi_ulong), q++) {
5311 if (get_user_ual(addr, gp)
5312 || !addr)
5313 break;
5314 unlock_user(*q, addr, 0);
5315 }
5316 for (gp = guest_envp, q = envp; *q;
5317 gp += sizeof(abi_ulong), q++) {
5318 if (get_user_ual(addr, gp)
5319 || !addr)
5320 break;
5321 unlock_user(*q, addr, 0);
5322 }
5323 }
5324 break;
5325 case TARGET_NR_chdir:
5326 if (!(p = lock_user_string(arg1)))
5327 goto efault;
5328 ret = get_errno(chdir(p));
5329 unlock_user(p, arg1, 0);
5330 break;
5331 #ifdef TARGET_NR_time
5332 case TARGET_NR_time:
5333 {
5334 time_t host_time;
5335 ret = get_errno(time(&host_time));
5336 if (!is_error(ret)
5337 && arg1
5338 && put_user_sal(host_time, arg1))
5339 goto efault;
5340 }
5341 break;
5342 #endif
5343 case TARGET_NR_mknod:
5344 if (!(p = lock_user_string(arg1)))
5345 goto efault;
5346 ret = get_errno(mknod(p, arg2, arg3));
5347 unlock_user(p, arg1, 0);
5348 break;
5349 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5350 case TARGET_NR_mknodat:
5351 if (!(p = lock_user_string(arg2)))
5352 goto efault;
5353 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5354 unlock_user(p, arg2, 0);
5355 break;
5356 #endif
5357 case TARGET_NR_chmod:
5358 if (!(p = lock_user_string(arg1)))
5359 goto efault;
5360 ret = get_errno(chmod(p, arg2));
5361 unlock_user(p, arg1, 0);
5362 break;
5363 #ifdef TARGET_NR_break
5364 case TARGET_NR_break:
5365 goto unimplemented;
5366 #endif
5367 #ifdef TARGET_NR_oldstat
5368 case TARGET_NR_oldstat:
5369 goto unimplemented;
5370 #endif
5371 case TARGET_NR_lseek:
5372 ret = get_errno(lseek(arg1, arg2, arg3));
5373 break;
5374 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5375 /* Alpha specific */
5376 case TARGET_NR_getxpid:
5377 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5378 ret = get_errno(getpid());
5379 break;
5380 #endif
5381 #ifdef TARGET_NR_getpid
5382 case TARGET_NR_getpid:
5383 ret = get_errno(getpid());
5384 break;
5385 #endif
5386 case TARGET_NR_mount:
5387 {
5388 /* need to look at the data field */
5389 void *p2, *p3;
5390 p = lock_user_string(arg1);
5391 p2 = lock_user_string(arg2);
5392 p3 = lock_user_string(arg3);
5393 if (!p || !p2 || !p3)
5394 ret = -TARGET_EFAULT;
5395 else {
5396 /* FIXME - arg5 should be locked, but it isn't clear how to
5397 * do that since it's not guaranteed to be a NULL-terminated
5398 * string.
5399 */
5400 if ( ! arg5 )
5401 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5402 else
5403 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5404 }
5405 unlock_user(p, arg1, 0);
5406 unlock_user(p2, arg2, 0);
5407 unlock_user(p3, arg3, 0);
5408 break;
5409 }
5410 #ifdef TARGET_NR_umount
5411 case TARGET_NR_umount:
5412 if (!(p = lock_user_string(arg1)))
5413 goto efault;
5414 ret = get_errno(umount(p));
5415 unlock_user(p, arg1, 0);
5416 break;
5417 #endif
5418 #ifdef TARGET_NR_stime /* not on alpha */
5419 case TARGET_NR_stime:
5420 {
5421 time_t host_time;
5422 if (get_user_sal(host_time, arg1))
5423 goto efault;
5424 ret = get_errno(stime(&host_time));
5425 }
5426 break;
5427 #endif
5428 case TARGET_NR_ptrace:
5429 goto unimplemented;
5430 #ifdef TARGET_NR_alarm /* not on alpha */
5431 case TARGET_NR_alarm:
5432 ret = alarm(arg1);
5433 break;
5434 #endif
5435 #ifdef TARGET_NR_oldfstat
5436 case TARGET_NR_oldfstat:
5437 goto unimplemented;
5438 #endif
5439 #ifdef TARGET_NR_pause /* not on alpha */
5440 case TARGET_NR_pause:
5441 ret = get_errno(pause());
5442 break;
5443 #endif
5444 #ifdef TARGET_NR_utime
5445 case TARGET_NR_utime:
5446 {
5447 struct utimbuf tbuf, *host_tbuf;
5448 struct target_utimbuf *target_tbuf;
5449 if (arg2) {
5450 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5451 goto efault;
5452 tbuf.actime = tswapal(target_tbuf->actime);
5453 tbuf.modtime = tswapal(target_tbuf->modtime);
5454 unlock_user_struct(target_tbuf, arg2, 0);
5455 host_tbuf = &tbuf;
5456 } else {
5457 host_tbuf = NULL;
5458 }
5459 if (!(p = lock_user_string(arg1)))
5460 goto efault;
5461 ret = get_errno(utime(p, host_tbuf));
5462 unlock_user(p, arg1, 0);
5463 }
5464 break;
5465 #endif
5466 case TARGET_NR_utimes:
5467 {
5468 struct timeval *tvp, tv[2];
5469 if (arg2) {
5470 if (copy_from_user_timeval(&tv[0], arg2)
5471 || copy_from_user_timeval(&tv[1],
5472 arg2 + sizeof(struct target_timeval)))
5473 goto efault;
5474 tvp = tv;
5475 } else {
5476 tvp = NULL;
5477 }
5478 if (!(p = lock_user_string(arg1)))
5479 goto efault;
5480 ret = get_errno(utimes(p, tvp));
5481 unlock_user(p, arg1, 0);
5482 }
5483 break;
5484 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5485 case TARGET_NR_futimesat:
5486 {
5487 struct timeval *tvp, tv[2];
5488 if (arg3) {
5489 if (copy_from_user_timeval(&tv[0], arg3)
5490 || copy_from_user_timeval(&tv[1],
5491 arg3 + sizeof(struct target_timeval)))
5492 goto efault;
5493 tvp = tv;
5494 } else {
5495 tvp = NULL;
5496 }
5497 if (!(p = lock_user_string(arg2)))
5498 goto efault;
5499 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5500 unlock_user(p, arg2, 0);
5501 }
5502 break;
5503 #endif
5504 #ifdef TARGET_NR_stty
5505 case TARGET_NR_stty:
5506 goto unimplemented;
5507 #endif
5508 #ifdef TARGET_NR_gtty
5509 case TARGET_NR_gtty:
5510 goto unimplemented;
5511 #endif
5512 case TARGET_NR_access:
5513 if (!(p = lock_user_string(arg1)))
5514 goto efault;
5515 ret = get_errno(access(path(p), arg2));
5516 unlock_user(p, arg1, 0);
5517 break;
5518 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5519 case TARGET_NR_faccessat:
5520 if (!(p = lock_user_string(arg2)))
5521 goto efault;
5522 ret = get_errno(sys_faccessat(arg1, p, arg3));
5523 unlock_user(p, arg2, 0);
5524 break;
5525 #endif
5526 #ifdef TARGET_NR_nice /* not on alpha */
5527 case TARGET_NR_nice:
5528 ret = get_errno(nice(arg1));
5529 break;
5530 #endif
5531 #ifdef TARGET_NR_ftime
5532 case TARGET_NR_ftime:
5533 goto unimplemented;
5534 #endif
5535 case TARGET_NR_sync:
5536 sync();
5537 ret = 0;
5538 break;
5539 case TARGET_NR_kill:
5540 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5541 break;
5542 case TARGET_NR_rename:
5543 {
5544 void *p2;
5545 p = lock_user_string(arg1);
5546 p2 = lock_user_string(arg2);
5547 if (!p || !p2)
5548 ret = -TARGET_EFAULT;
5549 else
5550 ret = get_errno(rename(p, p2));
5551 unlock_user(p2, arg2, 0);
5552 unlock_user(p, arg1, 0);
5553 }
5554 break;
5555 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5556 case TARGET_NR_renameat:
5557 {
5558 void *p2;
5559 p = lock_user_string(arg2);
5560 p2 = lock_user_string(arg4);
5561 if (!p || !p2)
5562 ret = -TARGET_EFAULT;
5563 else
5564 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5565 unlock_user(p2, arg4, 0);
5566 unlock_user(p, arg2, 0);
5567 }
5568 break;
5569 #endif
5570 case TARGET_NR_mkdir:
5571 if (!(p = lock_user_string(arg1)))
5572 goto efault;
5573 ret = get_errno(mkdir(p, arg2));
5574 unlock_user(p, arg1, 0);
5575 break;
5576 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5577 case TARGET_NR_mkdirat:
5578 if (!(p = lock_user_string(arg2)))
5579 goto efault;
5580 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5581 unlock_user(p, arg2, 0);
5582 break;
5583 #endif
5584 case TARGET_NR_rmdir:
5585 if (!(p = lock_user_string(arg1)))
5586 goto efault;
5587 ret = get_errno(rmdir(p));
5588 unlock_user(p, arg1, 0);
5589 break;
5590 case TARGET_NR_dup:
5591 ret = get_errno(dup(arg1));
5592 break;
5593 case TARGET_NR_pipe:
5594 ret = do_pipe(cpu_env, arg1, 0, 0);
5595 break;
5596 #ifdef TARGET_NR_pipe2
5597 case TARGET_NR_pipe2:
5598 ret = do_pipe(cpu_env, arg1, arg2, 1);
5599 break;
5600 #endif
5601 case TARGET_NR_times:
5602 {
5603 struct target_tms *tmsp;
5604 struct tms tms;
5605 ret = get_errno(times(&tms));
5606 if (arg1) {
5607 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5608 if (!tmsp)
5609 goto efault;
5610 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5611 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5612 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5613 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5614 }
5615 if (!is_error(ret))
5616 ret = host_to_target_clock_t(ret);
5617 }
5618 break;
5619 #ifdef TARGET_NR_prof
5620 case TARGET_NR_prof:
5621 goto unimplemented;
5622 #endif
5623 #ifdef TARGET_NR_signal
5624 case TARGET_NR_signal:
5625 goto unimplemented;
5626 #endif
5627 case TARGET_NR_acct:
5628 if (arg1 == 0) {
5629 ret = get_errno(acct(NULL));
5630 } else {
5631 if (!(p = lock_user_string(arg1)))
5632 goto efault;
5633 ret = get_errno(acct(path(p)));
5634 unlock_user(p, arg1, 0);
5635 }
5636 break;
5637 #ifdef TARGET_NR_umount2 /* not on alpha */
5638 case TARGET_NR_umount2:
5639 if (!(p = lock_user_string(arg1)))
5640 goto efault;
5641 ret = get_errno(umount2(p, arg2));
5642 unlock_user(p, arg1, 0);
5643 break;
5644 #endif
5645 #ifdef TARGET_NR_lock
5646 case TARGET_NR_lock:
5647 goto unimplemented;
5648 #endif
5649 case TARGET_NR_ioctl:
5650 ret = do_ioctl(arg1, arg2, arg3);
5651 break;
5652 case TARGET_NR_fcntl:
5653 ret = do_fcntl(arg1, arg2, arg3);
5654 break;
5655 #ifdef TARGET_NR_mpx
5656 case TARGET_NR_mpx:
5657 goto unimplemented;
5658 #endif
5659 case TARGET_NR_setpgid:
5660 ret = get_errno(setpgid(arg1, arg2));
5661 break;
5662 #ifdef TARGET_NR_ulimit
5663 case TARGET_NR_ulimit:
5664 goto unimplemented;
5665 #endif
5666 #ifdef TARGET_NR_oldolduname
5667 case TARGET_NR_oldolduname:
5668 goto unimplemented;
5669 #endif
5670 case TARGET_NR_umask:
5671 ret = get_errno(umask(arg1));
5672 break;
5673 case TARGET_NR_chroot:
5674 if (!(p = lock_user_string(arg1)))
5675 goto efault;
5676 ret = get_errno(chroot(p));
5677 unlock_user(p, arg1, 0);
5678 break;
5679 case TARGET_NR_ustat:
5680 goto unimplemented;
5681 case TARGET_NR_dup2:
5682 ret = get_errno(dup2(arg1, arg2));
5683 break;
5684 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5685 case TARGET_NR_dup3:
5686 ret = get_errno(dup3(arg1, arg2, arg3));
5687 break;
5688 #endif
5689 #ifdef TARGET_NR_getppid /* not on alpha */
5690 case TARGET_NR_getppid:
5691 ret = get_errno(getppid());
5692 break;
5693 #endif
5694 case TARGET_NR_getpgrp:
5695 ret = get_errno(getpgrp());
5696 break;
5697 case TARGET_NR_setsid:
5698 ret = get_errno(setsid());
5699 break;
5700 #ifdef TARGET_NR_sigaction
5701 case TARGET_NR_sigaction:
5702 {
5703 #if defined(TARGET_ALPHA)
5704 struct target_sigaction act, oact, *pact = 0;
5705 struct target_old_sigaction *old_act;
5706 if (arg2) {
5707 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5708 goto efault;
5709 act._sa_handler = old_act->_sa_handler;
5710 target_siginitset(&act.sa_mask, old_act->sa_mask);
5711 act.sa_flags = old_act->sa_flags;
5712 act.sa_restorer = 0;
5713 unlock_user_struct(old_act, arg2, 0);
5714 pact = &act;
5715 }
5716 ret = get_errno(do_sigaction(arg1, pact, &oact));
5717 if (!is_error(ret) && arg3) {
5718 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5719 goto efault;
5720 old_act->_sa_handler = oact._sa_handler;
5721 old_act->sa_mask = oact.sa_mask.sig[0];
5722 old_act->sa_flags = oact.sa_flags;
5723 unlock_user_struct(old_act, arg3, 1);
5724 }
5725 #elif defined(TARGET_MIPS)
5726 struct target_sigaction act, oact, *pact, *old_act;
5727
5728 if (arg2) {
5729 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5730 goto efault;
5731 act._sa_handler = old_act->_sa_handler;
5732 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5733 act.sa_flags = old_act->sa_flags;
5734 unlock_user_struct(old_act, arg2, 0);
5735 pact = &act;
5736 } else {
5737 pact = NULL;
5738 }
5739
5740 ret = get_errno(do_sigaction(arg1, pact, &oact));
5741
5742 if (!is_error(ret) && arg3) {
5743 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5744 goto efault;
5745 old_act->_sa_handler = oact._sa_handler;
5746 old_act->sa_flags = oact.sa_flags;
5747 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5748 old_act->sa_mask.sig[1] = 0;
5749 old_act->sa_mask.sig[2] = 0;
5750 old_act->sa_mask.sig[3] = 0;
5751 unlock_user_struct(old_act, arg3, 1);
5752 }
5753 #else
5754 struct target_old_sigaction *old_act;
5755 struct target_sigaction act, oact, *pact;
5756 if (arg2) {
5757 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5758 goto efault;
5759 act._sa_handler = old_act->_sa_handler;
5760 target_siginitset(&act.sa_mask, old_act->sa_mask);
5761 act.sa_flags = old_act->sa_flags;
5762 act.sa_restorer = old_act->sa_restorer;
5763 unlock_user_struct(old_act, arg2, 0);
5764 pact = &act;
5765 } else {
5766 pact = NULL;
5767 }
5768 ret = get_errno(do_sigaction(arg1, pact, &oact));
5769 if (!is_error(ret) && arg3) {
5770 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5771 goto efault;
5772 old_act->_sa_handler = oact._sa_handler;
5773 old_act->sa_mask = oact.sa_mask.sig[0];
5774 old_act->sa_flags = oact.sa_flags;
5775 old_act->sa_restorer = oact.sa_restorer;
5776 unlock_user_struct(old_act, arg3, 1);
5777 }
5778 #endif
5779 }
5780 break;
5781 #endif
5782 case TARGET_NR_rt_sigaction:
5783 {
5784 #if defined(TARGET_ALPHA)
5785 struct target_sigaction act, oact, *pact = 0;
5786 struct target_rt_sigaction *rt_act;
5787 /* ??? arg4 == sizeof(sigset_t). */
5788 if (arg2) {
5789 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5790 goto efault;
5791 act._sa_handler = rt_act->_sa_handler;
5792 act.sa_mask = rt_act->sa_mask;
5793 act.sa_flags = rt_act->sa_flags;
5794 act.sa_restorer = arg5;
5795 unlock_user_struct(rt_act, arg2, 0);
5796 pact = &act;
5797 }
5798 ret = get_errno(do_sigaction(arg1, pact, &oact));
5799 if (!is_error(ret) && arg3) {
5800 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5801 goto efault;
5802 rt_act->_sa_handler = oact._sa_handler;
5803 rt_act->sa_mask = oact.sa_mask;
5804 rt_act->sa_flags = oact.sa_flags;
5805 unlock_user_struct(rt_act, arg3, 1);
5806 }
5807 #else
5808 struct target_sigaction *act;
5809 struct target_sigaction *oact;
5810
5811 if (arg2) {
5812 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5813 goto efault;
5814 } else
5815 act = NULL;
5816 if (arg3) {
5817 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5818 ret = -TARGET_EFAULT;
5819 goto rt_sigaction_fail;
5820 }
5821 } else
5822 oact = NULL;
5823 ret = get_errno(do_sigaction(arg1, act, oact));
5824 rt_sigaction_fail:
5825 if (act)
5826 unlock_user_struct(act, arg2, 0);
5827 if (oact)
5828 unlock_user_struct(oact, arg3, 1);
5829 #endif
5830 }
5831 break;
5832 #ifdef TARGET_NR_sgetmask /* not on alpha */
5833 case TARGET_NR_sgetmask:
5834 {
5835 sigset_t cur_set;
5836 abi_ulong target_set;
5837 sigprocmask(0, NULL, &cur_set);
5838 host_to_target_old_sigset(&target_set, &cur_set);
5839 ret = target_set;
5840 }
5841 break;
5842 #endif
5843 #ifdef TARGET_NR_ssetmask /* not on alpha */
5844 case TARGET_NR_ssetmask:
5845 {
5846 sigset_t set, oset, cur_set;
5847 abi_ulong target_set = arg1;
5848 sigprocmask(0, NULL, &cur_set);
5849 target_to_host_old_sigset(&set, &target_set);
5850 sigorset(&set, &set, &cur_set);
5851 sigprocmask(SIG_SETMASK, &set, &oset);
5852 host_to_target_old_sigset(&target_set, &oset);
5853 ret = target_set;
5854 }
5855 break;
5856 #endif
5857 #ifdef TARGET_NR_sigprocmask
5858 case TARGET_NR_sigprocmask:
5859 {
5860 #if defined(TARGET_ALPHA)
5861 sigset_t set, oldset;
5862 abi_ulong mask;
5863 int how;
5864
5865 switch (arg1) {
5866 case TARGET_SIG_BLOCK:
5867 how = SIG_BLOCK;
5868 break;
5869 case TARGET_SIG_UNBLOCK:
5870 how = SIG_UNBLOCK;
5871 break;
5872 case TARGET_SIG_SETMASK:
5873 how = SIG_SETMASK;
5874 break;
5875 default:
5876 ret = -TARGET_EINVAL;
5877 goto fail;
5878 }
5879 mask = arg2;
5880 target_to_host_old_sigset(&set, &mask);
5881
5882 ret = get_errno(sigprocmask(how, &set, &oldset));
5883
5884 if (!is_error(ret)) {
5885 host_to_target_old_sigset(&mask, &oldset);
5886 ret = mask;
5887 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5888 }
5889 #else
5890 sigset_t set, oldset, *set_ptr;
5891 int how;
5892
5893 if (arg2) {
5894 switch (arg1) {
5895 case TARGET_SIG_BLOCK:
5896 how = SIG_BLOCK;
5897 break;
5898 case TARGET_SIG_UNBLOCK:
5899 how = SIG_UNBLOCK;
5900 break;
5901 case TARGET_SIG_SETMASK:
5902 how = SIG_SETMASK;
5903 break;
5904 default:
5905 ret = -TARGET_EINVAL;
5906 goto fail;
5907 }
5908 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5909 goto efault;
5910 target_to_host_old_sigset(&set, p);
5911 unlock_user(p, arg2, 0);
5912 set_ptr = &set;
5913 } else {
5914 how = 0;
5915 set_ptr = NULL;
5916 }
5917 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5918 if (!is_error(ret) && arg3) {
5919 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5920 goto efault;
5921 host_to_target_old_sigset(p, &oldset);
5922 unlock_user(p, arg3, sizeof(target_sigset_t));
5923 }
5924 #endif
5925 }
5926 break;
5927 #endif
5928 case TARGET_NR_rt_sigprocmask:
5929 {
5930 int how = arg1;
5931 sigset_t set, oldset, *set_ptr;
5932
5933 if (arg2) {
5934 switch(how) {
5935 case TARGET_SIG_BLOCK:
5936 how = SIG_BLOCK;
5937 break;
5938 case TARGET_SIG_UNBLOCK:
5939 how = SIG_UNBLOCK;
5940 break;
5941 case TARGET_SIG_SETMASK:
5942 how = SIG_SETMASK;
5943 break;
5944 default:
5945 ret = -TARGET_EINVAL;
5946 goto fail;
5947 }
5948 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5949 goto efault;
5950 target_to_host_sigset(&set, p);
5951 unlock_user(p, arg2, 0);
5952 set_ptr = &set;
5953 } else {
5954 how = 0;
5955 set_ptr = NULL;
5956 }
5957 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5958 if (!is_error(ret) && arg3) {
5959 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5960 goto efault;
5961 host_to_target_sigset(p, &oldset);
5962 unlock_user(p, arg3, sizeof(target_sigset_t));
5963 }
5964 }
5965 break;
5966 #ifdef TARGET_NR_sigpending
5967 case TARGET_NR_sigpending:
5968 {
5969 sigset_t set;
5970 ret = get_errno(sigpending(&set));
5971 if (!is_error(ret)) {
5972 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5973 goto efault;
5974 host_to_target_old_sigset(p, &set);
5975 unlock_user(p, arg1, sizeof(target_sigset_t));
5976 }
5977 }
5978 break;
5979 #endif
5980 case TARGET_NR_rt_sigpending:
5981 {
5982 sigset_t set;
5983 ret = get_errno(sigpending(&set));
5984 if (!is_error(ret)) {
5985 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5986 goto efault;
5987 host_to_target_sigset(p, &set);
5988 unlock_user(p, arg1, sizeof(target_sigset_t));
5989 }
5990 }
5991 break;
5992 #ifdef TARGET_NR_sigsuspend
5993 case TARGET_NR_sigsuspend:
5994 {
5995 sigset_t set;
5996 #if defined(TARGET_ALPHA)
5997 abi_ulong mask = arg1;
5998 target_to_host_old_sigset(&set, &mask);
5999 #else
6000 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6001 goto efault;
6002 target_to_host_old_sigset(&set, p);
6003 unlock_user(p, arg1, 0);
6004 #endif
6005 ret = get_errno(sigsuspend(&set));
6006 }
6007 break;
6008 #endif
6009 case TARGET_NR_rt_sigsuspend:
6010 {
6011 sigset_t set;
6012 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6013 goto efault;
6014 target_to_host_sigset(&set, p);
6015 unlock_user(p, arg1, 0);
6016 ret = get_errno(sigsuspend(&set));
6017 }
6018 break;
6019 case TARGET_NR_rt_sigtimedwait:
6020 {
6021 sigset_t set;
6022 struct timespec uts, *puts;
6023 siginfo_t uinfo;
6024
6025 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6026 goto efault;
6027 target_to_host_sigset(&set, p);
6028 unlock_user(p, arg1, 0);
6029 if (arg3) {
6030 puts = &uts;
6031 target_to_host_timespec(puts, arg3);
6032 } else {
6033 puts = NULL;
6034 }
6035 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6036 if (!is_error(ret) && arg2) {
6037 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6038 goto efault;
6039 host_to_target_siginfo(p, &uinfo);
6040 unlock_user(p, arg2, sizeof(target_siginfo_t));
6041 }
6042 }
6043 break;
6044 case TARGET_NR_rt_sigqueueinfo:
6045 {
6046 siginfo_t uinfo;
6047 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6048 goto efault;
6049 target_to_host_siginfo(&uinfo, p);
6050 unlock_user(p, arg1, 0);
6051 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6052 }
6053 break;
6054 #ifdef TARGET_NR_sigreturn
6055 case TARGET_NR_sigreturn:
6056 /* NOTE: ret is eax, so not transcoding must be done */
6057 ret = do_sigreturn(cpu_env);
6058 break;
6059 #endif
6060 case TARGET_NR_rt_sigreturn:
6061 /* NOTE: ret is eax, so not transcoding must be done */
6062 ret = do_rt_sigreturn(cpu_env);
6063 break;
6064 case TARGET_NR_sethostname:
6065 if (!(p = lock_user_string(arg1)))
6066 goto efault;
6067 ret = get_errno(sethostname(p, arg2));
6068 unlock_user(p, arg1, 0);
6069 break;
6070 case TARGET_NR_setrlimit:
6071 {
6072 int resource = target_to_host_resource(arg1);
6073 struct target_rlimit *target_rlim;
6074 struct rlimit rlim;
6075 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6076 goto efault;
6077 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6078 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6079 unlock_user_struct(target_rlim, arg2, 0);
6080 ret = get_errno(setrlimit(resource, &rlim));
6081 }
6082 break;
6083 case TARGET_NR_getrlimit:
6084 {
6085 int resource = target_to_host_resource(arg1);
6086 struct target_rlimit *target_rlim;
6087 struct rlimit rlim;
6088
6089 ret = get_errno(getrlimit(resource, &rlim));
6090 if (!is_error(ret)) {
6091 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6092 goto efault;
6093 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6094 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6095 unlock_user_struct(target_rlim, arg2, 1);
6096 }
6097 }
6098 break;
6099 case TARGET_NR_getrusage:
6100 {
6101 struct rusage rusage;
6102 ret = get_errno(getrusage(arg1, &rusage));
6103 if (!is_error(ret)) {
6104 host_to_target_rusage(arg2, &rusage);
6105 }
6106 }
6107 break;
6108 case TARGET_NR_gettimeofday:
6109 {
6110 struct timeval tv;
6111 ret = get_errno(gettimeofday(&tv, NULL));
6112 if (!is_error(ret)) {
6113 if (copy_to_user_timeval(arg1, &tv))
6114 goto efault;
6115 }
6116 }
6117 break;
6118 case TARGET_NR_settimeofday:
6119 {
6120 struct timeval tv;
6121 if (copy_from_user_timeval(&tv, arg1))
6122 goto efault;
6123 ret = get_errno(settimeofday(&tv, NULL));
6124 }
6125 break;
6126 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
6127 case TARGET_NR_select:
6128 {
6129 struct target_sel_arg_struct *sel;
6130 abi_ulong inp, outp, exp, tvp;
6131 long nsel;
6132
6133 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6134 goto efault;
6135 nsel = tswapal(sel->n);
6136 inp = tswapal(sel->inp);
6137 outp = tswapal(sel->outp);
6138 exp = tswapal(sel->exp);
6139 tvp = tswapal(sel->tvp);
6140 unlock_user_struct(sel, arg1, 0);
6141 ret = do_select(nsel, inp, outp, exp, tvp);
6142 }
6143 break;
6144 #endif
6145 #ifdef TARGET_NR_pselect6
6146 case TARGET_NR_pselect6:
6147 {
6148 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6149 fd_set rfds, wfds, efds;
6150 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6151 struct timespec ts, *ts_ptr;
6152
6153 /*
6154 * The 6th arg is actually two args smashed together,
6155 * so we cannot use the C library.
6156 */
6157 sigset_t set;
6158 struct {
6159 sigset_t *set;
6160 size_t size;
6161 } sig, *sig_ptr;
6162
6163 abi_ulong arg_sigset, arg_sigsize, *arg7;
6164 target_sigset_t *target_sigset;
6165
6166 n = arg1;
6167 rfd_addr = arg2;
6168 wfd_addr = arg3;
6169 efd_addr = arg4;
6170 ts_addr = arg5;
6171
6172 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6173 if (ret) {
6174 goto fail;
6175 }
6176 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6177 if (ret) {
6178 goto fail;
6179 }
6180 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6181 if (ret) {
6182 goto fail;
6183 }
6184
6185 /*
6186 * This takes a timespec, and not a timeval, so we cannot
6187 * use the do_select() helper ...
6188 */
6189 if (ts_addr) {
6190 if (target_to_host_timespec(&ts, ts_addr)) {
6191 goto efault;
6192 }
6193 ts_ptr = &ts;
6194 } else {
6195 ts_ptr = NULL;
6196 }
6197
6198 /* Extract the two packed args for the sigset */
6199 if (arg6) {
6200 sig_ptr = &sig;
6201 sig.size = _NSIG / 8;
6202
6203 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6204 if (!arg7) {
6205 goto efault;
6206 }
6207 arg_sigset = tswapal(arg7[0]);
6208 arg_sigsize = tswapal(arg7[1]);
6209 unlock_user(arg7, arg6, 0);
6210
6211 if (arg_sigset) {
6212 sig.set = &set;
6213 if (arg_sigsize != sizeof(*target_sigset)) {
6214 /* Like the kernel, we enforce correct size sigsets */
6215 ret = -TARGET_EINVAL;
6216 goto fail;
6217 }
6218 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6219 sizeof(*target_sigset), 1);
6220 if (!target_sigset) {
6221 goto efault;
6222 }
6223 target_to_host_sigset(&set, target_sigset);
6224 unlock_user(target_sigset, arg_sigset, 0);
6225 } else {
6226 sig.set = NULL;
6227 }
6228 } else {
6229 sig_ptr = NULL;
6230 }
6231
6232 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6233 ts_ptr, sig_ptr));
6234
6235 if (!is_error(ret)) {
6236 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6237 goto efault;
6238 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6239 goto efault;
6240 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6241 goto efault;
6242
6243 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6244 goto efault;
6245 }
6246 }
6247 break;
6248 #endif
6249 case TARGET_NR_symlink:
6250 {
6251 void *p2;
6252 p = lock_user_string(arg1);
6253 p2 = lock_user_string(arg2);
6254 if (!p || !p2)
6255 ret = -TARGET_EFAULT;
6256 else
6257 ret = get_errno(symlink(p, p2));
6258 unlock_user(p2, arg2, 0);
6259 unlock_user(p, arg1, 0);
6260 }
6261 break;
6262 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6263 case TARGET_NR_symlinkat:
6264 {
6265 void *p2;
6266 p = lock_user_string(arg1);
6267 p2 = lock_user_string(arg3);
6268 if (!p || !p2)
6269 ret = -TARGET_EFAULT;
6270 else
6271 ret = get_errno(sys_symlinkat(p, arg2, p2));
6272 unlock_user(p2, arg3, 0);
6273 unlock_user(p, arg1, 0);
6274 }
6275 break;
6276 #endif
6277 #ifdef TARGET_NR_oldlstat
6278 case TARGET_NR_oldlstat:
6279 goto unimplemented;
6280 #endif
6281 case TARGET_NR_readlink:
6282 {
6283 void *p2, *temp;
6284 p = lock_user_string(arg1);
6285 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6286 if (!p || !p2)
6287 ret = -TARGET_EFAULT;
6288 else {
6289 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6290 char real[PATH_MAX];
6291 temp = realpath(exec_path,real);
6292 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6293 snprintf((char *)p2, arg3, "%s", real);
6294 }
6295 else
6296 ret = get_errno(readlink(path(p), p2, arg3));
6297 }
6298 unlock_user(p2, arg2, ret);
6299 unlock_user(p, arg1, 0);
6300 }
6301 break;
6302 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6303 case TARGET_NR_readlinkat:
6304 {
6305 void *p2;
6306 p = lock_user_string(arg2);
6307 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6308 if (!p || !p2)
6309 ret = -TARGET_EFAULT;
6310 else
6311 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6312 unlock_user(p2, arg3, ret);
6313 unlock_user(p, arg2, 0);
6314 }
6315 break;
6316 #endif
6317 #ifdef TARGET_NR_uselib
6318 case TARGET_NR_uselib:
6319 goto unimplemented;
6320 #endif
6321 #ifdef TARGET_NR_swapon
6322 case TARGET_NR_swapon:
6323 if (!(p = lock_user_string(arg1)))
6324 goto efault;
6325 ret = get_errno(swapon(p, arg2));
6326 unlock_user(p, arg1, 0);
6327 break;
6328 #endif
6329 case TARGET_NR_reboot:
6330 if (!(p = lock_user_string(arg4)))
6331 goto efault;
6332 ret = reboot(arg1, arg2, arg3, p);
6333 unlock_user(p, arg4, 0);
6334 break;
6335 #ifdef TARGET_NR_readdir
6336 case TARGET_NR_readdir:
6337 goto unimplemented;
6338 #endif
6339 #ifdef TARGET_NR_mmap
6340 case TARGET_NR_mmap:
6341 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6342 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6343 || defined(TARGET_S390X)
6344 {
6345 abi_ulong *v;
6346 abi_ulong v1, v2, v3, v4, v5, v6;
6347 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6348 goto efault;
6349 v1 = tswapal(v[0]);
6350 v2 = tswapal(v[1]);
6351 v3 = tswapal(v[2]);
6352 v4 = tswapal(v[3]);
6353 v5 = tswapal(v[4]);
6354 v6 = tswapal(v[5]);
6355 unlock_user(v, arg1, 0);
6356 ret = get_errno(target_mmap(v1, v2, v3,
6357 target_to_host_bitmask(v4, mmap_flags_tbl),
6358 v5, v6));
6359 }
6360 #else
6361 ret = get_errno(target_mmap(arg1, arg2, arg3,
6362 target_to_host_bitmask(arg4, mmap_flags_tbl),
6363 arg5,
6364 arg6));
6365 #endif
6366 break;
6367 #endif
6368 #ifdef TARGET_NR_mmap2
6369 case TARGET_NR_mmap2:
6370 #ifndef MMAP_SHIFT
6371 #define MMAP_SHIFT 12
6372 #endif
6373 ret = get_errno(target_mmap(arg1, arg2, arg3,
6374 target_to_host_bitmask(arg4, mmap_flags_tbl),
6375 arg5,
6376 arg6 << MMAP_SHIFT));
6377 break;
6378 #endif
6379 case TARGET_NR_munmap:
6380 ret = get_errno(target_munmap(arg1, arg2));
6381 break;
6382 case TARGET_NR_mprotect:
6383 {
6384 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6385 /* Special hack to detect libc making the stack executable. */
6386 if ((arg3 & PROT_GROWSDOWN)
6387 && arg1 >= ts->info->stack_limit
6388 && arg1 <= ts->info->start_stack) {
6389 arg3 &= ~PROT_GROWSDOWN;
6390 arg2 = arg2 + arg1 - ts->info->stack_limit;
6391 arg1 = ts->info->stack_limit;
6392 }
6393 }
6394 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6395 break;
6396 #ifdef TARGET_NR_mremap
6397 case TARGET_NR_mremap:
6398 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6399 break;
6400 #endif
6401 /* ??? msync/mlock/munlock are broken for softmmu. */
6402 #ifdef TARGET_NR_msync
6403 case TARGET_NR_msync:
6404 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6405 break;
6406 #endif
6407 #ifdef TARGET_NR_mlock
6408 case TARGET_NR_mlock:
6409 ret = get_errno(mlock(g2h(arg1), arg2));
6410 break;
6411 #endif
6412 #ifdef TARGET_NR_munlock
6413 case TARGET_NR_munlock:
6414 ret = get_errno(munlock(g2h(arg1), arg2));
6415 break;
6416 #endif
6417 #ifdef TARGET_NR_mlockall
6418 case TARGET_NR_mlockall:
6419 ret = get_errno(mlockall(arg1));
6420 break;
6421 #endif
6422 #ifdef TARGET_NR_munlockall
6423 case TARGET_NR_munlockall:
6424 ret = get_errno(munlockall());
6425 break;
6426 #endif
6427 case TARGET_NR_truncate:
6428 if (!(p = lock_user_string(arg1)))
6429 goto efault;
6430 ret = get_errno(truncate(p, arg2));
6431 unlock_user(p, arg1, 0);
6432 break;
6433 case TARGET_NR_ftruncate:
6434 ret = get_errno(ftruncate(arg1, arg2));
6435 break;
6436 case TARGET_NR_fchmod:
6437 ret = get_errno(fchmod(arg1, arg2));
6438 break;
6439 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6440 case TARGET_NR_fchmodat:
6441 if (!(p = lock_user_string(arg2)))
6442 goto efault;
6443 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6444 unlock_user(p, arg2, 0);
6445 break;
6446 #endif
6447 case TARGET_NR_getpriority:
6448 /* libc does special remapping of the return value of
6449 * sys_getpriority() so it's just easiest to call
6450 * sys_getpriority() directly rather than through libc. */
6451 ret = get_errno(sys_getpriority(arg1, arg2));
6452 break;
6453 case TARGET_NR_setpriority:
6454 ret = get_errno(setpriority(arg1, arg2, arg3));
6455 break;
6456 #ifdef TARGET_NR_profil
6457 case TARGET_NR_profil:
6458 goto unimplemented;
6459 #endif
6460 case TARGET_NR_statfs:
6461 if (!(p = lock_user_string(arg1)))
6462 goto efault;
6463 ret = get_errno(statfs(path(p), &stfs));
6464 unlock_user(p, arg1, 0);
6465 convert_statfs:
6466 if (!is_error(ret)) {
6467 struct target_statfs *target_stfs;
6468
6469 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6470 goto efault;
6471 __put_user(stfs.f_type, &target_stfs->f_type);
6472 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6473 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6474 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6475 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6476 __put_user(stfs.f_files, &target_stfs->f_files);
6477 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6478 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6479 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6480 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6481 unlock_user_struct(target_stfs, arg2, 1);
6482 }
6483 break;
6484 case TARGET_NR_fstatfs:
6485 ret = get_errno(fstatfs(arg1, &stfs));
6486 goto convert_statfs;
6487 #ifdef TARGET_NR_statfs64
6488 case TARGET_NR_statfs64:
6489 if (!(p = lock_user_string(arg1)))
6490 goto efault;
6491 ret = get_errno(statfs(path(p), &stfs));
6492 unlock_user(p, arg1, 0);
6493 convert_statfs64:
6494 if (!is_error(ret)) {
6495 struct target_statfs64 *target_stfs;
6496
6497 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6498 goto efault;
6499 __put_user(stfs.f_type, &target_stfs->f_type);
6500 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6501 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6502 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6503 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6504 __put_user(stfs.f_files, &target_stfs->f_files);
6505 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6506 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6507 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6508 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6509 unlock_user_struct(target_stfs, arg3, 1);
6510 }
6511 break;
6512 case TARGET_NR_fstatfs64:
6513 ret = get_errno(fstatfs(arg1, &stfs));
6514 goto convert_statfs64;
6515 #endif
6516 #ifdef TARGET_NR_ioperm
6517 case TARGET_NR_ioperm:
6518 goto unimplemented;
6519 #endif
6520 #ifdef TARGET_NR_socketcall
6521 case TARGET_NR_socketcall:
6522 ret = do_socketcall(arg1, arg2);
6523 break;
6524 #endif
6525 #ifdef TARGET_NR_accept
6526 case TARGET_NR_accept:
6527 ret = do_accept(arg1, arg2, arg3);
6528 break;
6529 #endif
6530 #ifdef TARGET_NR_bind
6531 case TARGET_NR_bind:
6532 ret = do_bind(arg1, arg2, arg3);
6533 break;
6534 #endif
6535 #ifdef TARGET_NR_connect
6536 case TARGET_NR_connect:
6537 ret = do_connect(arg1, arg2, arg3);
6538 break;
6539 #endif
6540 #ifdef TARGET_NR_getpeername
6541 case TARGET_NR_getpeername:
6542 ret = do_getpeername(arg1, arg2, arg3);
6543 break;
6544 #endif
6545 #ifdef TARGET_NR_getsockname
6546 case TARGET_NR_getsockname:
6547 ret = do_getsockname(arg1, arg2, arg3);
6548 break;
6549 #endif
6550 #ifdef TARGET_NR_getsockopt
6551 case TARGET_NR_getsockopt:
6552 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6553 break;
6554 #endif
6555 #ifdef TARGET_NR_listen
6556 case TARGET_NR_listen:
6557 ret = get_errno(listen(arg1, arg2));
6558 break;
6559 #endif
6560 #ifdef TARGET_NR_recv
6561 case TARGET_NR_recv:
6562 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6563 break;
6564 #endif
6565 #ifdef TARGET_NR_recvfrom
6566 case TARGET_NR_recvfrom:
6567 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6568 break;
6569 #endif
6570 #ifdef TARGET_NR_recvmsg
6571 case TARGET_NR_recvmsg:
6572 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6573 break;
6574 #endif
6575 #ifdef TARGET_NR_send
6576 case TARGET_NR_send:
6577 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6578 break;
6579 #endif
6580 #ifdef TARGET_NR_sendmsg
6581 case TARGET_NR_sendmsg:
6582 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6583 break;
6584 #endif
6585 #ifdef TARGET_NR_sendto
6586 case TARGET_NR_sendto:
6587 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6588 break;
6589 #endif
6590 #ifdef TARGET_NR_shutdown
6591 case TARGET_NR_shutdown:
6592 ret = get_errno(shutdown(arg1, arg2));
6593 break;
6594 #endif
6595 #ifdef TARGET_NR_socket
6596 case TARGET_NR_socket:
6597 ret = do_socket(arg1, arg2, arg3);
6598 break;
6599 #endif
6600 #ifdef TARGET_NR_socketpair
6601 case TARGET_NR_socketpair:
6602 ret = do_socketpair(arg1, arg2, arg3, arg4);
6603 break;
6604 #endif
6605 #ifdef TARGET_NR_setsockopt
6606 case TARGET_NR_setsockopt:
6607 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6608 break;
6609 #endif
6610
6611 case TARGET_NR_syslog:
6612 if (!(p = lock_user_string(arg2)))
6613 goto efault;
6614 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6615 unlock_user(p, arg2, 0);
6616 break;
6617
6618 case TARGET_NR_setitimer:
6619 {
6620 struct itimerval value, ovalue, *pvalue;
6621
6622 if (arg2) {
6623 pvalue = &value;
6624 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6625 || copy_from_user_timeval(&pvalue->it_value,
6626 arg2 + sizeof(struct target_timeval)))
6627 goto efault;
6628 } else {
6629 pvalue = NULL;
6630 }
6631 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6632 if (!is_error(ret) && arg3) {
6633 if (copy_to_user_timeval(arg3,
6634 &ovalue.it_interval)
6635 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6636 &ovalue.it_value))
6637 goto efault;
6638 }
6639 }
6640 break;
6641 case TARGET_NR_getitimer:
6642 {
6643 struct itimerval value;
6644
6645 ret = get_errno(getitimer(arg1, &value));
6646 if (!is_error(ret) && arg2) {
6647 if (copy_to_user_timeval(arg2,
6648 &value.it_interval)
6649 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6650 &value.it_value))
6651 goto efault;
6652 }
6653 }
6654 break;
6655 case TARGET_NR_stat:
6656 if (!(p = lock_user_string(arg1)))
6657 goto efault;
6658 ret = get_errno(stat(path(p), &st));
6659 unlock_user(p, arg1, 0);
6660 goto do_stat;
6661 case TARGET_NR_lstat:
6662 if (!(p = lock_user_string(arg1)))
6663 goto efault;
6664 ret = get_errno(lstat(path(p), &st));
6665 unlock_user(p, arg1, 0);
6666 goto do_stat;
6667 case TARGET_NR_fstat:
6668 {
6669 ret = get_errno(fstat(arg1, &st));
6670 do_stat:
6671 if (!is_error(ret)) {
6672 struct target_stat *target_st;
6673
6674 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6675 goto efault;
6676 memset(target_st, 0, sizeof(*target_st));
6677 __put_user(st.st_dev, &target_st->st_dev);
6678 __put_user(st.st_ino, &target_st->st_ino);
6679 __put_user(st.st_mode, &target_st->st_mode);
6680 __put_user(st.st_uid, &target_st->st_uid);
6681 __put_user(st.st_gid, &target_st->st_gid);
6682 __put_user(st.st_nlink, &target_st->st_nlink);
6683 __put_user(st.st_rdev, &target_st->st_rdev);
6684 __put_user(st.st_size, &target_st->st_size);
6685 __put_user(st.st_blksize, &target_st->st_blksize);
6686 __put_user(st.st_blocks, &target_st->st_blocks);
6687 __put_user(st.st_atime, &target_st->target_st_atime);
6688 __put_user(st.st_mtime, &target_st->target_st_mtime);
6689 __put_user(st.st_ctime, &target_st->target_st_ctime);
6690 unlock_user_struct(target_st, arg2, 1);
6691 }
6692 }
6693 break;
6694 #ifdef TARGET_NR_olduname
6695 case TARGET_NR_olduname:
6696 goto unimplemented;
6697 #endif
6698 #ifdef TARGET_NR_iopl
6699 case TARGET_NR_iopl:
6700 goto unimplemented;
6701 #endif
6702 case TARGET_NR_vhangup:
6703 ret = get_errno(vhangup());
6704 break;
6705 #ifdef TARGET_NR_idle
6706 case TARGET_NR_idle:
6707 goto unimplemented;
6708 #endif
6709 #ifdef TARGET_NR_syscall
6710 case TARGET_NR_syscall:
6711 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6712 arg6, arg7, arg8, 0);
6713 break;
6714 #endif
6715 case TARGET_NR_wait4:
6716 {
6717 int status;
6718 abi_long status_ptr = arg2;
6719 struct rusage rusage, *rusage_ptr;
6720 abi_ulong target_rusage = arg4;
6721 if (target_rusage)
6722 rusage_ptr = &rusage;
6723 else
6724 rusage_ptr = NULL;
6725 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6726 if (!is_error(ret)) {
6727 if (status_ptr && ret) {
6728 status = host_to_target_waitstatus(status);
6729 if (put_user_s32(status, status_ptr))
6730 goto efault;
6731 }
6732 if (target_rusage)
6733 host_to_target_rusage(target_rusage, &rusage);
6734 }
6735 }
6736 break;
6737 #ifdef TARGET_NR_swapoff
6738 case TARGET_NR_swapoff:
6739 if (!(p = lock_user_string(arg1)))
6740 goto efault;
6741 ret = get_errno(swapoff(p));
6742 unlock_user(p, arg1, 0);
6743 break;
6744 #endif
6745 case TARGET_NR_sysinfo:
6746 {
6747 struct target_sysinfo *target_value;
6748 struct sysinfo value;
6749 ret = get_errno(sysinfo(&value));
6750 if (!is_error(ret) && arg1)
6751 {
6752 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6753 goto efault;
6754 __put_user(value.uptime, &target_value->uptime);
6755 __put_user(value.loads[0], &target_value->loads[0]);
6756 __put_user(value.loads[1], &target_value->loads[1]);
6757 __put_user(value.loads[2], &target_value->loads[2]);
6758 __put_user(value.totalram, &target_value->totalram);
6759 __put_user(value.freeram, &target_value->freeram);
6760 __put_user(value.sharedram, &target_value->sharedram);
6761 __put_user(value.bufferram, &target_value->bufferram);
6762 __put_user(value.totalswap, &target_value->totalswap);
6763 __put_user(value.freeswap, &target_value->freeswap);
6764 __put_user(value.procs, &target_value->procs);
6765 __put_user(value.totalhigh, &target_value->totalhigh);
6766 __put_user(value.freehigh, &target_value->freehigh);
6767 __put_user(value.mem_unit, &target_value->mem_unit);
6768 unlock_user_struct(target_value, arg1, 1);
6769 }
6770 }
6771 break;
6772 #ifdef TARGET_NR_ipc
6773 case TARGET_NR_ipc:
6774 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6775 break;
6776 #endif
6777 #ifdef TARGET_NR_semget
6778 case TARGET_NR_semget:
6779 ret = get_errno(semget(arg1, arg2, arg3));
6780 break;
6781 #endif
6782 #ifdef TARGET_NR_semop
6783 case TARGET_NR_semop:
6784 ret = get_errno(do_semop(arg1, arg2, arg3));
6785 break;
6786 #endif
6787 #ifdef TARGET_NR_semctl
6788 case TARGET_NR_semctl:
6789 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6790 break;
6791 #endif
6792 #ifdef TARGET_NR_msgctl
6793 case TARGET_NR_msgctl:
6794 ret = do_msgctl(arg1, arg2, arg3);
6795 break;
6796 #endif
6797 #ifdef TARGET_NR_msgget
6798 case TARGET_NR_msgget:
6799 ret = get_errno(msgget(arg1, arg2));
6800 break;
6801 #endif
6802 #ifdef TARGET_NR_msgrcv
6803 case TARGET_NR_msgrcv:
6804 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6805 break;
6806 #endif
6807 #ifdef TARGET_NR_msgsnd
6808 case TARGET_NR_msgsnd:
6809 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6810 break;
6811 #endif
6812 #ifdef TARGET_NR_shmget
6813 case TARGET_NR_shmget:
6814 ret = get_errno(shmget(arg1, arg2, arg3));
6815 break;
6816 #endif
6817 #ifdef TARGET_NR_shmctl
6818 case TARGET_NR_shmctl:
6819 ret = do_shmctl(arg1, arg2, arg3);
6820 break;
6821 #endif
6822 #ifdef TARGET_NR_shmat
6823 case TARGET_NR_shmat:
6824 ret = do_shmat(arg1, arg2, arg3);
6825 break;
6826 #endif
6827 #ifdef TARGET_NR_shmdt
6828 case TARGET_NR_shmdt:
6829 ret = do_shmdt(arg1);
6830 break;
6831 #endif
6832 case TARGET_NR_fsync:
6833 ret = get_errno(fsync(arg1));
6834 break;
6835 case TARGET_NR_clone:
6836 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6837 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6838 #elif defined(TARGET_CRIS)
6839 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6840 #elif defined(TARGET_S390X)
6841 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6842 #else
6843 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6844 #endif
6845 break;
6846 #ifdef __NR_exit_group
6847 /* new thread calls */
6848 case TARGET_NR_exit_group:
6849 #ifdef TARGET_GPROF
6850 _mcleanup();
6851 #endif
6852 gdb_exit(cpu_env, arg1);
6853 ret = get_errno(exit_group(arg1));
6854 break;
6855 #endif
6856 case TARGET_NR_setdomainname:
6857 if (!(p = lock_user_string(arg1)))
6858 goto efault;
6859 ret = get_errno(setdomainname(p, arg2));
6860 unlock_user(p, arg1, 0);
6861 break;
6862 case TARGET_NR_uname:
6863 /* no need to transcode because we use the linux syscall */
6864 {
6865 struct new_utsname * buf;
6866
6867 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6868 goto efault;
6869 ret = get_errno(sys_uname(buf));
6870 if (!is_error(ret)) {
6871 /* Overrite the native machine name with whatever is being
6872 emulated. */
6873 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6874 /* Allow the user to override the reported release. */
6875 if (qemu_uname_release && *qemu_uname_release)
6876 strcpy (buf->release, qemu_uname_release);
6877 }
6878 unlock_user_struct(buf, arg1, 1);
6879 }
6880 break;
6881 #ifdef TARGET_I386
6882 case TARGET_NR_modify_ldt:
6883 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6884 break;
6885 #if !defined(TARGET_X86_64)
6886 case TARGET_NR_vm86old:
6887 goto unimplemented;
6888 case TARGET_NR_vm86:
6889 ret = do_vm86(cpu_env, arg1, arg2);
6890 break;
6891 #endif
6892 #endif
6893 case TARGET_NR_adjtimex:
6894 goto unimplemented;
6895 #ifdef TARGET_NR_create_module
6896 case TARGET_NR_create_module:
6897 #endif
6898 case TARGET_NR_init_module:
6899 case TARGET_NR_delete_module:
6900 #ifdef TARGET_NR_get_kernel_syms
6901 case TARGET_NR_get_kernel_syms:
6902 #endif
6903 goto unimplemented;
6904 case TARGET_NR_quotactl:
6905 goto unimplemented;
6906 case TARGET_NR_getpgid:
6907 ret = get_errno(getpgid(arg1));
6908 break;
6909 case TARGET_NR_fchdir:
6910 ret = get_errno(fchdir(arg1));
6911 break;
6912 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6913 case TARGET_NR_bdflush:
6914 goto unimplemented;
6915 #endif
6916 #ifdef TARGET_NR_sysfs
6917 case TARGET_NR_sysfs:
6918 goto unimplemented;
6919 #endif
6920 case TARGET_NR_personality:
6921 ret = get_errno(personality(arg1));
6922 break;
6923 #ifdef TARGET_NR_afs_syscall
6924 case TARGET_NR_afs_syscall:
6925 goto unimplemented;
6926 #endif
6927 #ifdef TARGET_NR__llseek /* Not on alpha */
6928 case TARGET_NR__llseek:
6929 {
6930 int64_t res;
6931 #if !defined(__NR_llseek)
6932 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6933 if (res == -1) {
6934 ret = get_errno(res);
6935 } else {
6936 ret = 0;
6937 }
6938 #else
6939 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6940 #endif
6941 if ((ret == 0) && put_user_s64(res, arg4)) {
6942 goto efault;
6943 }
6944 }
6945 break;
6946 #endif
6947 case TARGET_NR_getdents:
6948 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6949 {
6950 struct target_dirent *target_dirp;
6951 struct linux_dirent *dirp;
6952 abi_long count = arg3;
6953
6954 dirp = malloc(count);
6955 if (!dirp) {
6956 ret = -TARGET_ENOMEM;
6957 goto fail;
6958 }
6959
6960 ret = get_errno(sys_getdents(arg1, dirp, count));
6961 if (!is_error(ret)) {
6962 struct linux_dirent *de;
6963 struct target_dirent *tde;
6964 int len = ret;
6965 int reclen, treclen;
6966 int count1, tnamelen;
6967
6968 count1 = 0;
6969 de = dirp;
6970 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6971 goto efault;
6972 tde = target_dirp;
6973 while (len > 0) {
6974 reclen = de->d_reclen;
6975 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6976 tde->d_reclen = tswap16(treclen);
6977 tde->d_ino = tswapal(de->d_ino);
6978 tde->d_off = tswapal(de->d_off);
6979 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6980 if (tnamelen > 256)
6981 tnamelen = 256;
6982 /* XXX: may not be correct */
6983 pstrcpy(tde->d_name, tnamelen, de->d_name);
6984 de = (struct linux_dirent *)((char *)de + reclen);
6985 len -= reclen;
6986 tde = (struct target_dirent *)((char *)tde + treclen);
6987 count1 += treclen;
6988 }
6989 ret = count1;
6990 unlock_user(target_dirp, arg2, ret);
6991 }
6992 free(dirp);
6993 }
6994 #else
6995 {
6996 struct linux_dirent *dirp;
6997 abi_long count = arg3;
6998
6999 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7000 goto efault;
7001 ret = get_errno(sys_getdents(arg1, dirp, count));
7002 if (!is_error(ret)) {
7003 struct linux_dirent *de;
7004 int len = ret;
7005 int reclen;
7006 de = dirp;
7007 while (len > 0) {
7008 reclen = de->d_reclen;
7009 if (reclen > len)
7010 break;
7011 de->d_reclen = tswap16(reclen);
7012 tswapls(&de->d_ino);
7013 tswapls(&de->d_off);
7014 de = (struct linux_dirent *)((char *)de + reclen);
7015 len -= reclen;
7016 }
7017 }
7018 unlock_user(dirp, arg2, ret);
7019 }
7020 #endif
7021 break;
7022 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7023 case TARGET_NR_getdents64:
7024 {
7025 struct linux_dirent64 *dirp;
7026 abi_long count = arg3;
7027 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7028 goto efault;
7029 ret = get_errno(sys_getdents64(arg1, dirp, count));
7030 if (!is_error(ret)) {
7031 struct linux_dirent64 *de;
7032 int len = ret;
7033 int reclen;
7034 de = dirp;
7035 while (len > 0) {
7036 reclen = de->d_reclen;
7037 if (reclen > len)
7038 break;
7039 de->d_reclen = tswap16(reclen);
7040 tswap64s((uint64_t *)&de->d_ino);
7041 tswap64s((uint64_t *)&de->d_off);
7042 de = (struct linux_dirent64 *)((char *)de + reclen);
7043 len -= reclen;
7044 }
7045 }
7046 unlock_user(dirp, arg2, ret);
7047 }
7048 break;
7049 #endif /* TARGET_NR_getdents64 */
7050 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
7051 #ifdef TARGET_S390X
7052 case TARGET_NR_select:
7053 #else
7054 case TARGET_NR__newselect:
7055 #endif
7056 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7057 break;
7058 #endif
7059 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7060 # ifdef TARGET_NR_poll
7061 case TARGET_NR_poll:
7062 # endif
7063 # ifdef TARGET_NR_ppoll
7064 case TARGET_NR_ppoll:
7065 # endif
7066 {
7067 struct target_pollfd *target_pfd;
7068 unsigned int nfds = arg2;
7069 int timeout = arg3;
7070 struct pollfd *pfd;
7071 unsigned int i;
7072
7073 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7074 if (!target_pfd)
7075 goto efault;
7076
7077 pfd = alloca(sizeof(struct pollfd) * nfds);
7078 for(i = 0; i < nfds; i++) {
7079 pfd[i].fd = tswap32(target_pfd[i].fd);
7080 pfd[i].events = tswap16(target_pfd[i].events);
7081 }
7082
7083 # ifdef TARGET_NR_ppoll
7084 if (num == TARGET_NR_ppoll) {
7085 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7086 target_sigset_t *target_set;
7087 sigset_t _set, *set = &_set;
7088
7089 if (arg3) {
7090 if (target_to_host_timespec(timeout_ts, arg3)) {
7091 unlock_user(target_pfd, arg1, 0);
7092 goto efault;
7093 }
7094 } else {
7095 timeout_ts = NULL;
7096 }
7097
7098 if (arg4) {
7099 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7100 if (!target_set) {
7101 unlock_user(target_pfd, arg1, 0);
7102 goto efault;
7103 }
7104 target_to_host_sigset(set, target_set);
7105 } else {
7106 set = NULL;
7107 }
7108
7109 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7110
7111 if (!is_error(ret) && arg3) {
7112 host_to_target_timespec(arg3, timeout_ts);
7113 }
7114 if (arg4) {
7115 unlock_user(target_set, arg4, 0);
7116 }
7117 } else
7118 # endif
7119 ret = get_errno(poll(pfd, nfds, timeout));
7120
7121 if (!is_error(ret)) {
7122 for(i = 0; i < nfds; i++) {
7123 target_pfd[i].revents = tswap16(pfd[i].revents);
7124 }
7125 }
7126 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7127 }
7128 break;
7129 #endif
7130 case TARGET_NR_flock:
7131 /* NOTE: the flock constant seems to be the same for every
7132 Linux platform */
7133 ret = get_errno(flock(arg1, arg2));
7134 break;
7135 case TARGET_NR_readv:
7136 {
7137 int count = arg3;
7138 struct iovec *vec;
7139
7140 vec = alloca(count * sizeof(struct iovec));
7141 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
7142 goto efault;
7143 ret = get_errno(readv(arg1, vec, count));
7144 unlock_iovec(vec, arg2, count, 1);
7145 }
7146 break;
7147 case TARGET_NR_writev:
7148 {
7149 int count = arg3;
7150 struct iovec *vec;
7151
7152 vec = alloca(count * sizeof(struct iovec));
7153 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7154 goto efault;
7155 ret = get_errno(writev(arg1, vec, count));
7156 unlock_iovec(vec, arg2, count, 0);
7157 }
7158 break;
7159 case TARGET_NR_getsid:
7160 ret = get_errno(getsid(arg1));
7161 break;
7162 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7163 case TARGET_NR_fdatasync:
7164 ret = get_errno(fdatasync(arg1));
7165 break;
7166 #endif
7167 case TARGET_NR__sysctl:
7168 /* We don't implement this, but ENOTDIR is always a safe
7169 return value. */
7170 ret = -TARGET_ENOTDIR;
7171 break;
7172 case TARGET_NR_sched_getaffinity:
7173 {
7174 unsigned int mask_size;
7175 unsigned long *mask;
7176
7177 /*
7178 * sched_getaffinity needs multiples of ulong, so need to take
7179 * care of mismatches between target ulong and host ulong sizes.
7180 */
7181 if (arg2 & (sizeof(abi_ulong) - 1)) {
7182 ret = -TARGET_EINVAL;
7183 break;
7184 }
7185 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7186
7187 mask = alloca(mask_size);
7188 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7189
7190 if (!is_error(ret)) {
7191 if (copy_to_user(arg3, mask, ret)) {
7192 goto efault;
7193 }
7194 }
7195 }
7196 break;
7197 case TARGET_NR_sched_setaffinity:
7198 {
7199 unsigned int mask_size;
7200 unsigned long *mask;
7201
7202 /*
7203 * sched_setaffinity needs multiples of ulong, so need to take
7204 * care of mismatches between target ulong and host ulong sizes.
7205 */
7206 if (arg2 & (sizeof(abi_ulong) - 1)) {
7207 ret = -TARGET_EINVAL;
7208 break;
7209 }
7210 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7211
7212 mask = alloca(mask_size);
7213 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7214 goto efault;
7215 }
7216 memcpy(mask, p, arg2);
7217 unlock_user_struct(p, arg2, 0);
7218
7219 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7220 }
7221 break;
7222 case TARGET_NR_sched_setparam:
7223 {
7224 struct sched_param *target_schp;
7225 struct sched_param schp;
7226
7227 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7228 goto efault;
7229 schp.sched_priority = tswap32(target_schp->sched_priority);
7230 unlock_user_struct(target_schp, arg2, 0);
7231 ret = get_errno(sched_setparam(arg1, &schp));
7232 }
7233 break;
7234 case TARGET_NR_sched_getparam:
7235 {
7236 struct sched_param *target_schp;
7237 struct sched_param schp;
7238 ret = get_errno(sched_getparam(arg1, &schp));
7239 if (!is_error(ret)) {
7240 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7241 goto efault;
7242 target_schp->sched_priority = tswap32(schp.sched_priority);
7243 unlock_user_struct(target_schp, arg2, 1);
7244 }
7245 }
7246 break;
7247 case TARGET_NR_sched_setscheduler:
7248 {
7249 struct sched_param *target_schp;
7250 struct sched_param schp;
7251 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7252 goto efault;
7253 schp.sched_priority = tswap32(target_schp->sched_priority);
7254 unlock_user_struct(target_schp, arg3, 0);
7255 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7256 }
7257 break;
7258 case TARGET_NR_sched_getscheduler:
7259 ret = get_errno(sched_getscheduler(arg1));
7260 break;
7261 case TARGET_NR_sched_yield:
7262 ret = get_errno(sched_yield());
7263 break;
7264 case TARGET_NR_sched_get_priority_max:
7265 ret = get_errno(sched_get_priority_max(arg1));
7266 break;
7267 case TARGET_NR_sched_get_priority_min:
7268 ret = get_errno(sched_get_priority_min(arg1));
7269 break;
7270 case TARGET_NR_sched_rr_get_interval:
7271 {
7272 struct timespec ts;
7273 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7274 if (!is_error(ret)) {
7275 host_to_target_timespec(arg2, &ts);
7276 }
7277 }
7278 break;
7279 case TARGET_NR_nanosleep:
7280 {
7281 struct timespec req, rem;
7282 target_to_host_timespec(&req, arg1);
7283 ret = get_errno(nanosleep(&req, &rem));
7284 if (is_error(ret) && arg2) {
7285 host_to_target_timespec(arg2, &rem);
7286 }
7287 }
7288 break;
7289 #ifdef TARGET_NR_query_module
7290 case TARGET_NR_query_module:
7291 goto unimplemented;
7292 #endif
7293 #ifdef TARGET_NR_nfsservctl
7294 case TARGET_NR_nfsservctl:
7295 goto unimplemented;
7296 #endif
7297 case TARGET_NR_prctl:
7298 switch (arg1) {
7299 case PR_GET_PDEATHSIG:
7300 {
7301 int deathsig;
7302 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7303 if (!is_error(ret) && arg2
7304 && put_user_ual(deathsig, arg2)) {
7305 goto efault;
7306 }
7307 break;
7308 }
7309 #ifdef PR_GET_NAME
7310 case PR_GET_NAME:
7311 {
7312 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7313 if (!name) {
7314 goto efault;
7315 }
7316 ret = get_errno(prctl(arg1, (unsigned long)name,
7317 arg3, arg4, arg5));
7318 unlock_user(name, arg2, 16);
7319 break;
7320 }
7321 case PR_SET_NAME:
7322 {
7323 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7324 if (!name) {
7325 goto efault;
7326 }
7327 ret = get_errno(prctl(arg1, (unsigned long)name,
7328 arg3, arg4, arg5));
7329 unlock_user(name, arg2, 0);
7330 break;
7331 }
7332 #endif
7333 default:
7334 /* Most prctl options have no pointer arguments */
7335 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7336 break;
7337 }
7338 break;
7339 #ifdef TARGET_NR_arch_prctl
7340 case TARGET_NR_arch_prctl:
7341 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7342 ret = do_arch_prctl(cpu_env, arg1, arg2);
7343 break;
7344 #else
7345 goto unimplemented;
7346 #endif
7347 #endif
7348 #ifdef TARGET_NR_pread
7349 case TARGET_NR_pread:
7350 if (regpairs_aligned(cpu_env))
7351 arg4 = arg5;
7352 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7353 goto efault;
7354 ret = get_errno(pread(arg1, p, arg3, arg4));
7355 unlock_user(p, arg2, ret);
7356 break;
7357 case TARGET_NR_pwrite:
7358 if (regpairs_aligned(cpu_env))
7359 arg4 = arg5;
7360 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7361 goto efault;
7362 ret = get_errno(pwrite(arg1, p, arg3, arg4));
7363 unlock_user(p, arg2, 0);
7364 break;
7365 #endif
7366 #ifdef TARGET_NR_pread64
7367 case TARGET_NR_pread64:
7368 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7369 goto efault;
7370 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7371 unlock_user(p, arg2, ret);
7372 break;
7373 case TARGET_NR_pwrite64:
7374 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7375 goto efault;
7376 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7377 unlock_user(p, arg2, 0);
7378 break;
7379 #endif
7380 case TARGET_NR_getcwd:
7381 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7382 goto efault;
7383 ret = get_errno(sys_getcwd1(p, arg2));
7384 unlock_user(p, arg1, ret);
7385 break;
7386 case TARGET_NR_capget:
7387 goto unimplemented;
7388 case TARGET_NR_capset:
7389 goto unimplemented;
7390 case TARGET_NR_sigaltstack:
7391 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7392 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7393 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7394 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7395 break;
7396 #else
7397 goto unimplemented;
7398 #endif
7399 case TARGET_NR_sendfile:
7400 goto unimplemented;
7401 #ifdef TARGET_NR_getpmsg
7402 case TARGET_NR_getpmsg:
7403 goto unimplemented;
7404 #endif
7405 #ifdef TARGET_NR_putpmsg
7406 case TARGET_NR_putpmsg:
7407 goto unimplemented;
7408 #endif
7409 #ifdef TARGET_NR_vfork
7410 case TARGET_NR_vfork:
7411 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7412 0, 0, 0, 0));
7413 break;
7414 #endif
7415 #ifdef TARGET_NR_ugetrlimit
7416 case TARGET_NR_ugetrlimit:
7417 {
7418 struct rlimit rlim;
7419 int resource = target_to_host_resource(arg1);
7420 ret = get_errno(getrlimit(resource, &rlim));
7421 if (!is_error(ret)) {
7422 struct target_rlimit *target_rlim;
7423 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7424 goto efault;
7425 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7426 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7427 unlock_user_struct(target_rlim, arg2, 1);
7428 }
7429 break;
7430 }
7431 #endif
7432 #ifdef TARGET_NR_truncate64
7433 case TARGET_NR_truncate64:
7434 if (!(p = lock_user_string(arg1)))
7435 goto efault;
7436 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7437 unlock_user(p, arg1, 0);
7438 break;
7439 #endif
7440 #ifdef TARGET_NR_ftruncate64
7441 case TARGET_NR_ftruncate64:
7442 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7443 break;
7444 #endif
7445 #ifdef TARGET_NR_stat64
7446 case TARGET_NR_stat64:
7447 if (!(p = lock_user_string(arg1)))
7448 goto efault;
7449 ret = get_errno(stat(path(p), &st));
7450 unlock_user(p, arg1, 0);
7451 if (!is_error(ret))
7452 ret = host_to_target_stat64(cpu_env, arg2, &st);
7453 break;
7454 #endif
7455 #ifdef TARGET_NR_lstat64
7456 case TARGET_NR_lstat64:
7457 if (!(p = lock_user_string(arg1)))
7458 goto efault;
7459 ret = get_errno(lstat(path(p), &st));
7460 unlock_user(p, arg1, 0);
7461 if (!is_error(ret))
7462 ret = host_to_target_stat64(cpu_env, arg2, &st);
7463 break;
7464 #endif
7465 #ifdef TARGET_NR_fstat64
7466 case TARGET_NR_fstat64:
7467 ret = get_errno(fstat(arg1, &st));
7468 if (!is_error(ret))
7469 ret = host_to_target_stat64(cpu_env, arg2, &st);
7470 break;
7471 #endif
7472 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7473 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7474 #ifdef TARGET_NR_fstatat64
7475 case TARGET_NR_fstatat64:
7476 #endif
7477 #ifdef TARGET_NR_newfstatat
7478 case TARGET_NR_newfstatat:
7479 #endif
7480 if (!(p = lock_user_string(arg2)))
7481 goto efault;
7482 #ifdef __NR_fstatat64
7483 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7484 #else
7485 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7486 #endif
7487 if (!is_error(ret))
7488 ret = host_to_target_stat64(cpu_env, arg3, &st);
7489 break;
7490 #endif
7491 case TARGET_NR_lchown:
7492 if (!(p = lock_user_string(arg1)))
7493 goto efault;
7494 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7495 unlock_user(p, arg1, 0);
7496 break;
7497 #ifdef TARGET_NR_getuid
7498 case TARGET_NR_getuid:
7499 ret = get_errno(high2lowuid(getuid()));
7500 break;
7501 #endif
7502 #ifdef TARGET_NR_getgid
7503 case TARGET_NR_getgid:
7504 ret = get_errno(high2lowgid(getgid()));
7505 break;
7506 #endif
7507 #ifdef TARGET_NR_geteuid
7508 case TARGET_NR_geteuid:
7509 ret = get_errno(high2lowuid(geteuid()));
7510 break;
7511 #endif
7512 #ifdef TARGET_NR_getegid
7513 case TARGET_NR_getegid:
7514 ret = get_errno(high2lowgid(getegid()));
7515 break;
7516 #endif
7517 case TARGET_NR_setreuid:
7518 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7519 break;
7520 case TARGET_NR_setregid:
7521 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7522 break;
7523 case TARGET_NR_getgroups:
7524 {
7525 int gidsetsize = arg1;
7526 target_id *target_grouplist;
7527 gid_t *grouplist;
7528 int i;
7529
7530 grouplist = alloca(gidsetsize * sizeof(gid_t));
7531 ret = get_errno(getgroups(gidsetsize, grouplist));
7532 if (gidsetsize == 0)
7533 break;
7534 if (!is_error(ret)) {
7535 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7536 if (!target_grouplist)
7537 goto efault;
7538 for(i = 0;i < ret; i++)
7539 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7540 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7541 }
7542 }
7543 break;
7544 case TARGET_NR_setgroups:
7545 {
7546 int gidsetsize = arg1;
7547 target_id *target_grouplist;
7548 gid_t *grouplist;
7549 int i;
7550
7551 grouplist = alloca(gidsetsize * sizeof(gid_t));
7552 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7553 if (!target_grouplist) {
7554 ret = -TARGET_EFAULT;
7555 goto fail;
7556 }
7557 for(i = 0;i < gidsetsize; i++)
7558 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7559 unlock_user(target_grouplist, arg2, 0);
7560 ret = get_errno(setgroups(gidsetsize, grouplist));
7561 }
7562 break;
7563 case TARGET_NR_fchown:
7564 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7565 break;
7566 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7567 case TARGET_NR_fchownat:
7568 if (!(p = lock_user_string(arg2)))
7569 goto efault;
7570 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7571 unlock_user(p, arg2, 0);
7572 break;
7573 #endif
7574 #ifdef TARGET_NR_setresuid
7575 case TARGET_NR_setresuid:
7576 ret = get_errno(setresuid(low2highuid(arg1),
7577 low2highuid(arg2),
7578 low2highuid(arg3)));
7579 break;
7580 #endif
7581 #ifdef TARGET_NR_getresuid
7582 case TARGET_NR_getresuid:
7583 {
7584 uid_t ruid, euid, suid;
7585 ret = get_errno(getresuid(&ruid, &euid, &suid));
7586 if (!is_error(ret)) {
7587 if (put_user_u16(high2lowuid(ruid), arg1)
7588 || put_user_u16(high2lowuid(euid), arg2)
7589 || put_user_u16(high2lowuid(suid), arg3))
7590 goto efault;
7591 }
7592 }
7593 break;
7594 #endif
7595 #ifdef TARGET_NR_getresgid
7596 case TARGET_NR_setresgid:
7597 ret = get_errno(setresgid(low2highgid(arg1),
7598 low2highgid(arg2),
7599 low2highgid(arg3)));
7600 break;
7601 #endif
7602 #ifdef TARGET_NR_getresgid
7603 case TARGET_NR_getresgid:
7604 {
7605 gid_t rgid, egid, sgid;
7606 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7607 if (!is_error(ret)) {
7608 if (put_user_u16(high2lowgid(rgid), arg1)
7609 || put_user_u16(high2lowgid(egid), arg2)
7610 || put_user_u16(high2lowgid(sgid), arg3))
7611 goto efault;
7612 }
7613 }
7614 break;
7615 #endif
7616 case TARGET_NR_chown:
7617 if (!(p = lock_user_string(arg1)))
7618 goto efault;
7619 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7620 unlock_user(p, arg1, 0);
7621 break;
7622 case TARGET_NR_setuid:
7623 ret = get_errno(setuid(low2highuid(arg1)));
7624 break;
7625 case TARGET_NR_setgid:
7626 ret = get_errno(setgid(low2highgid(arg1)));
7627 break;
7628 case TARGET_NR_setfsuid:
7629 ret = get_errno(setfsuid(arg1));
7630 break;
7631 case TARGET_NR_setfsgid:
7632 ret = get_errno(setfsgid(arg1));
7633 break;
7634
7635 #ifdef TARGET_NR_lchown32
7636 case TARGET_NR_lchown32:
7637 if (!(p = lock_user_string(arg1)))
7638 goto efault;
7639 ret = get_errno(lchown(p, arg2, arg3));
7640 unlock_user(p, arg1, 0);
7641 break;
7642 #endif
7643 #ifdef TARGET_NR_getuid32
7644 case TARGET_NR_getuid32:
7645 ret = get_errno(getuid());
7646 break;
7647 #endif
7648
7649 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7650 /* Alpha specific */
7651 case TARGET_NR_getxuid:
7652 {
7653 uid_t euid;
7654 euid=geteuid();
7655 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7656 }
7657 ret = get_errno(getuid());
7658 break;
7659 #endif
7660 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7661 /* Alpha specific */
7662 case TARGET_NR_getxgid:
7663 {
7664 uid_t egid;
7665 egid=getegid();
7666 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7667 }
7668 ret = get_errno(getgid());
7669 break;
7670 #endif
7671 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7672 /* Alpha specific */
7673 case TARGET_NR_osf_getsysinfo:
7674 ret = -TARGET_EOPNOTSUPP;
7675 switch (arg1) {
7676 case TARGET_GSI_IEEE_FP_CONTROL:
7677 {
7678 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7679
7680 /* Copied from linux ieee_fpcr_to_swcr. */
7681 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7682 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7683 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7684 | SWCR_TRAP_ENABLE_DZE
7685 | SWCR_TRAP_ENABLE_OVF);
7686 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7687 | SWCR_TRAP_ENABLE_INE);
7688 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7689 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7690
7691 if (put_user_u64 (swcr, arg2))
7692 goto efault;
7693 ret = 0;
7694 }
7695 break;
7696
7697 /* case GSI_IEEE_STATE_AT_SIGNAL:
7698 -- Not implemented in linux kernel.
7699 case GSI_UACPROC:
7700 -- Retrieves current unaligned access state; not much used.
7701 case GSI_PROC_TYPE:
7702 -- Retrieves implver information; surely not used.
7703 case GSI_GET_HWRPB:
7704 -- Grabs a copy of the HWRPB; surely not used.
7705 */
7706 }
7707 break;
7708 #endif
7709 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7710 /* Alpha specific */
7711 case TARGET_NR_osf_setsysinfo:
7712 ret = -TARGET_EOPNOTSUPP;
7713 switch (arg1) {
7714 case TARGET_SSI_IEEE_FP_CONTROL:
7715 {
7716 uint64_t swcr, fpcr, orig_fpcr;
7717
7718 if (get_user_u64 (swcr, arg2)) {
7719 goto efault;
7720 }
7721 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7722 fpcr = orig_fpcr & FPCR_DYN_MASK;
7723
7724 /* Copied from linux ieee_swcr_to_fpcr. */
7725 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7726 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7727 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7728 | SWCR_TRAP_ENABLE_DZE
7729 | SWCR_TRAP_ENABLE_OVF)) << 48;
7730 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7731 | SWCR_TRAP_ENABLE_INE)) << 57;
7732 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7733 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7734
7735 cpu_alpha_store_fpcr(cpu_env, fpcr);
7736 ret = 0;
7737 }
7738 break;
7739
7740 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7741 {
7742 uint64_t exc, fpcr, orig_fpcr;
7743 int si_code;
7744
7745 if (get_user_u64(exc, arg2)) {
7746 goto efault;
7747 }
7748
7749 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7750
7751 /* We only add to the exception status here. */
7752 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7753
7754 cpu_alpha_store_fpcr(cpu_env, fpcr);
7755 ret = 0;
7756
7757 /* Old exceptions are not signaled. */
7758 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7759
7760 /* If any exceptions set by this call,
7761 and are unmasked, send a signal. */
7762 si_code = 0;
7763 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7764 si_code = TARGET_FPE_FLTRES;
7765 }
7766 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7767 si_code = TARGET_FPE_FLTUND;
7768 }
7769 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7770 si_code = TARGET_FPE_FLTOVF;
7771 }
7772 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7773 si_code = TARGET_FPE_FLTDIV;
7774 }
7775 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7776 si_code = TARGET_FPE_FLTINV;
7777 }
7778 if (si_code != 0) {
7779 target_siginfo_t info;
7780 info.si_signo = SIGFPE;
7781 info.si_errno = 0;
7782 info.si_code = si_code;
7783 info._sifields._sigfault._addr
7784 = ((CPUArchState *)cpu_env)->pc;
7785 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7786 }
7787 }
7788 break;
7789
7790 /* case SSI_NVPAIRS:
7791 -- Used with SSIN_UACPROC to enable unaligned accesses.
7792 case SSI_IEEE_STATE_AT_SIGNAL:
7793 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7794 -- Not implemented in linux kernel
7795 */
7796 }
7797 break;
7798 #endif
7799 #ifdef TARGET_NR_osf_sigprocmask
7800 /* Alpha specific. */
7801 case TARGET_NR_osf_sigprocmask:
7802 {
7803 abi_ulong mask;
7804 int how;
7805 sigset_t set, oldset;
7806
7807 switch(arg1) {
7808 case TARGET_SIG_BLOCK:
7809 how = SIG_BLOCK;
7810 break;
7811 case TARGET_SIG_UNBLOCK:
7812 how = SIG_UNBLOCK;
7813 break;
7814 case TARGET_SIG_SETMASK:
7815 how = SIG_SETMASK;
7816 break;
7817 default:
7818 ret = -TARGET_EINVAL;
7819 goto fail;
7820 }
7821 mask = arg2;
7822 target_to_host_old_sigset(&set, &mask);
7823 sigprocmask(how, &set, &oldset);
7824 host_to_target_old_sigset(&mask, &oldset);
7825 ret = mask;
7826 }
7827 break;
7828 #endif
7829
7830 #ifdef TARGET_NR_getgid32
7831 case TARGET_NR_getgid32:
7832 ret = get_errno(getgid());
7833 break;
7834 #endif
7835 #ifdef TARGET_NR_geteuid32
7836 case TARGET_NR_geteuid32:
7837 ret = get_errno(geteuid());
7838 break;
7839 #endif
7840 #ifdef TARGET_NR_getegid32
7841 case TARGET_NR_getegid32:
7842 ret = get_errno(getegid());
7843 break;
7844 #endif
7845 #ifdef TARGET_NR_setreuid32
7846 case TARGET_NR_setreuid32:
7847 ret = get_errno(setreuid(arg1, arg2));
7848 break;
7849 #endif
7850 #ifdef TARGET_NR_setregid32
7851 case TARGET_NR_setregid32:
7852 ret = get_errno(setregid(arg1, arg2));
7853 break;
7854 #endif
7855 #ifdef TARGET_NR_getgroups32
7856 case TARGET_NR_getgroups32:
7857 {
7858 int gidsetsize = arg1;
7859 uint32_t *target_grouplist;
7860 gid_t *grouplist;
7861 int i;
7862
7863 grouplist = alloca(gidsetsize * sizeof(gid_t));
7864 ret = get_errno(getgroups(gidsetsize, grouplist));
7865 if (gidsetsize == 0)
7866 break;
7867 if (!is_error(ret)) {
7868 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7869 if (!target_grouplist) {
7870 ret = -TARGET_EFAULT;
7871 goto fail;
7872 }
7873 for(i = 0;i < ret; i++)
7874 target_grouplist[i] = tswap32(grouplist[i]);
7875 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7876 }
7877 }
7878 break;
7879 #endif
7880 #ifdef TARGET_NR_setgroups32
7881 case TARGET_NR_setgroups32:
7882 {
7883 int gidsetsize = arg1;
7884 uint32_t *target_grouplist;
7885 gid_t *grouplist;
7886 int i;
7887
7888 grouplist = alloca(gidsetsize * sizeof(gid_t));
7889 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7890 if (!target_grouplist) {
7891 ret = -TARGET_EFAULT;
7892 goto fail;
7893 }
7894 for(i = 0;i < gidsetsize; i++)
7895 grouplist[i] = tswap32(target_grouplist[i]);
7896 unlock_user(target_grouplist, arg2, 0);
7897 ret = get_errno(setgroups(gidsetsize, grouplist));
7898 }
7899 break;
7900 #endif
7901 #ifdef TARGET_NR_fchown32
7902 case TARGET_NR_fchown32:
7903 ret = get_errno(fchown(arg1, arg2, arg3));
7904 break;
7905 #endif
7906 #ifdef TARGET_NR_setresuid32
7907 case TARGET_NR_setresuid32:
7908 ret = get_errno(setresuid(arg1, arg2, arg3));
7909 break;
7910 #endif
7911 #ifdef TARGET_NR_getresuid32
7912 case TARGET_NR_getresuid32:
7913 {
7914 uid_t ruid, euid, suid;
7915 ret = get_errno(getresuid(&ruid, &euid, &suid));
7916 if (!is_error(ret)) {
7917 if (put_user_u32(ruid, arg1)
7918 || put_user_u32(euid, arg2)
7919 || put_user_u32(suid, arg3))
7920 goto efault;
7921 }
7922 }
7923 break;
7924 #endif
7925 #ifdef TARGET_NR_setresgid32
7926 case TARGET_NR_setresgid32:
7927 ret = get_errno(setresgid(arg1, arg2, arg3));
7928 break;
7929 #endif
7930 #ifdef TARGET_NR_getresgid32
7931 case TARGET_NR_getresgid32:
7932 {
7933 gid_t rgid, egid, sgid;
7934 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7935 if (!is_error(ret)) {
7936 if (put_user_u32(rgid, arg1)
7937 || put_user_u32(egid, arg2)
7938 || put_user_u32(sgid, arg3))
7939 goto efault;
7940 }
7941 }
7942 break;
7943 #endif
7944 #ifdef TARGET_NR_chown32
7945 case TARGET_NR_chown32:
7946 if (!(p = lock_user_string(arg1)))
7947 goto efault;
7948 ret = get_errno(chown(p, arg2, arg3));
7949 unlock_user(p, arg1, 0);
7950 break;
7951 #endif
7952 #ifdef TARGET_NR_setuid32
7953 case TARGET_NR_setuid32:
7954 ret = get_errno(setuid(arg1));
7955 break;
7956 #endif
7957 #ifdef TARGET_NR_setgid32
7958 case TARGET_NR_setgid32:
7959 ret = get_errno(setgid(arg1));
7960 break;
7961 #endif
7962 #ifdef TARGET_NR_setfsuid32
7963 case TARGET_NR_setfsuid32:
7964 ret = get_errno(setfsuid(arg1));
7965 break;
7966 #endif
7967 #ifdef TARGET_NR_setfsgid32
7968 case TARGET_NR_setfsgid32:
7969 ret = get_errno(setfsgid(arg1));
7970 break;
7971 #endif
7972
7973 case TARGET_NR_pivot_root:
7974 goto unimplemented;
7975 #ifdef TARGET_NR_mincore
7976 case TARGET_NR_mincore:
7977 {
7978 void *a;
7979 ret = -TARGET_EFAULT;
7980 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7981 goto efault;
7982 if (!(p = lock_user_string(arg3)))
7983 goto mincore_fail;
7984 ret = get_errno(mincore(a, arg2, p));
7985 unlock_user(p, arg3, ret);
7986 mincore_fail:
7987 unlock_user(a, arg1, 0);
7988 }
7989 break;
7990 #endif
7991 #ifdef TARGET_NR_arm_fadvise64_64
7992 case TARGET_NR_arm_fadvise64_64:
7993 {
7994 /*
7995 * arm_fadvise64_64 looks like fadvise64_64 but
7996 * with different argument order
7997 */
7998 abi_long temp;
7999 temp = arg3;
8000 arg3 = arg4;
8001 arg4 = temp;
8002 }
8003 #endif
8004 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8005 #ifdef TARGET_NR_fadvise64_64
8006 case TARGET_NR_fadvise64_64:
8007 #endif
8008 #ifdef TARGET_NR_fadvise64
8009 case TARGET_NR_fadvise64:
8010 #endif
8011 #ifdef TARGET_S390X
8012 switch (arg4) {
8013 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8014 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8015 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8016 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8017 default: break;
8018 }
8019 #endif
8020 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8021 break;
8022 #endif
8023 #ifdef TARGET_NR_madvise
8024 case TARGET_NR_madvise:
8025 /* A straight passthrough may not be safe because qemu sometimes
8026 turns private flie-backed mappings into anonymous mappings.
8027 This will break MADV_DONTNEED.
8028 This is a hint, so ignoring and returning success is ok. */
8029 ret = get_errno(0);
8030 break;
8031 #endif
8032 #if TARGET_ABI_BITS == 32
8033 case TARGET_NR_fcntl64:
8034 {
8035 int cmd;
8036 struct flock64 fl;
8037 struct target_flock64 *target_fl;
8038 #ifdef TARGET_ARM
8039 struct target_eabi_flock64 *target_efl;
8040 #endif
8041
8042 cmd = target_to_host_fcntl_cmd(arg2);
8043 if (cmd == -TARGET_EINVAL) {
8044 ret = cmd;
8045 break;
8046 }
8047
8048 switch(arg2) {
8049 case TARGET_F_GETLK64:
8050 #ifdef TARGET_ARM
8051 if (((CPUARMState *)cpu_env)->eabi) {
8052 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8053 goto efault;
8054 fl.l_type = tswap16(target_efl->l_type);
8055 fl.l_whence = tswap16(target_efl->l_whence);
8056 fl.l_start = tswap64(target_efl->l_start);
8057 fl.l_len = tswap64(target_efl->l_len);
8058 fl.l_pid = tswap32(target_efl->l_pid);
8059 unlock_user_struct(target_efl, arg3, 0);
8060 } else
8061 #endif
8062 {
8063 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8064 goto efault;
8065 fl.l_type = tswap16(target_fl->l_type);
8066 fl.l_whence = tswap16(target_fl->l_whence);
8067 fl.l_start = tswap64(target_fl->l_start);
8068 fl.l_len = tswap64(target_fl->l_len);
8069 fl.l_pid = tswap32(target_fl->l_pid);
8070 unlock_user_struct(target_fl, arg3, 0);
8071 }
8072 ret = get_errno(fcntl(arg1, cmd, &fl));
8073 if (ret == 0) {
8074 #ifdef TARGET_ARM
8075 if (((CPUARMState *)cpu_env)->eabi) {
8076 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8077 goto efault;
8078 target_efl->l_type = tswap16(fl.l_type);
8079 target_efl->l_whence = tswap16(fl.l_whence);
8080 target_efl->l_start = tswap64(fl.l_start);
8081 target_efl->l_len = tswap64(fl.l_len);
8082 target_efl->l_pid = tswap32(fl.l_pid);
8083 unlock_user_struct(target_efl, arg3, 1);
8084 } else
8085 #endif
8086 {
8087 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8088 goto efault;
8089 target_fl->l_type = tswap16(fl.l_type);
8090 target_fl->l_whence = tswap16(fl.l_whence);
8091 target_fl->l_start = tswap64(fl.l_start);
8092 target_fl->l_len = tswap64(fl.l_len);
8093 target_fl->l_pid = tswap32(fl.l_pid);
8094 unlock_user_struct(target_fl, arg3, 1);
8095 }
8096 }
8097 break;
8098
8099 case TARGET_F_SETLK64:
8100 case TARGET_F_SETLKW64:
8101 #ifdef TARGET_ARM
8102 if (((CPUARMState *)cpu_env)->eabi) {
8103 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8104 goto efault;
8105 fl.l_type = tswap16(target_efl->l_type);
8106 fl.l_whence = tswap16(target_efl->l_whence);
8107 fl.l_start = tswap64(target_efl->l_start);
8108 fl.l_len = tswap64(target_efl->l_len);
8109 fl.l_pid = tswap32(target_efl->l_pid);
8110 unlock_user_struct(target_efl, arg3, 0);
8111 } else
8112 #endif
8113 {
8114 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8115 goto efault;
8116 fl.l_type = tswap16(target_fl->l_type);
8117 fl.l_whence = tswap16(target_fl->l_whence);
8118 fl.l_start = tswap64(target_fl->l_start);
8119 fl.l_len = tswap64(target_fl->l_len);
8120 fl.l_pid = tswap32(target_fl->l_pid);
8121 unlock_user_struct(target_fl, arg3, 0);
8122 }
8123 ret = get_errno(fcntl(arg1, cmd, &fl));
8124 break;
8125 default:
8126 ret = do_fcntl(arg1, arg2, arg3);
8127 break;
8128 }
8129 break;
8130 }
8131 #endif
8132 #ifdef TARGET_NR_cacheflush
8133 case TARGET_NR_cacheflush:
8134 /* self-modifying code is handled automatically, so nothing needed */
8135 ret = 0;
8136 break;
8137 #endif
8138 #ifdef TARGET_NR_security
8139 case TARGET_NR_security:
8140 goto unimplemented;
8141 #endif
8142 #ifdef TARGET_NR_getpagesize
8143 case TARGET_NR_getpagesize:
8144 ret = TARGET_PAGE_SIZE;
8145 break;
8146 #endif
8147 case TARGET_NR_gettid:
8148 ret = get_errno(gettid());
8149 break;
8150 #ifdef TARGET_NR_readahead
8151 case TARGET_NR_readahead:
8152 #if TARGET_ABI_BITS == 32
8153 if (regpairs_aligned(cpu_env)) {
8154 arg2 = arg3;
8155 arg3 = arg4;
8156 arg4 = arg5;
8157 }
8158 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8159 #else
8160 ret = get_errno(readahead(arg1, arg2, arg3));
8161 #endif
8162 break;
8163 #endif
8164 #ifdef CONFIG_ATTR
8165 #ifdef TARGET_NR_setxattr
8166 case TARGET_NR_listxattr:
8167 case TARGET_NR_llistxattr:
8168 {
8169 void *p, *b = 0;
8170 if (arg2) {
8171 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8172 if (!b) {
8173 ret = -TARGET_EFAULT;
8174 break;
8175 }
8176 }
8177 p = lock_user_string(arg1);
8178 if (p) {
8179 if (num == TARGET_NR_listxattr) {
8180 ret = get_errno(listxattr(p, b, arg3));
8181 } else {
8182 ret = get_errno(llistxattr(p, b, arg3));
8183 }
8184 } else {
8185 ret = -TARGET_EFAULT;
8186 }
8187 unlock_user(p, arg1, 0);
8188 unlock_user(b, arg2, arg3);
8189 break;
8190 }
8191 case TARGET_NR_flistxattr:
8192 {
8193 void *b = 0;
8194 if (arg2) {
8195 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8196 if (!b) {
8197 ret = -TARGET_EFAULT;
8198 break;
8199 }
8200 }
8201 ret = get_errno(flistxattr(arg1, b, arg3));
8202 unlock_user(b, arg2, arg3);
8203 break;
8204 }
8205 case TARGET_NR_setxattr:
8206 case TARGET_NR_lsetxattr:
8207 {
8208 void *p, *n, *v = 0;
8209 if (arg3) {
8210 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8211 if (!v) {
8212 ret = -TARGET_EFAULT;
8213 break;
8214 }
8215 }
8216 p = lock_user_string(arg1);
8217 n = lock_user_string(arg2);
8218 if (p && n) {
8219 if (num == TARGET_NR_setxattr) {
8220 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8221 } else {
8222 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8223 }
8224 } else {
8225 ret = -TARGET_EFAULT;
8226 }
8227 unlock_user(p, arg1, 0);
8228 unlock_user(n, arg2, 0);
8229 unlock_user(v, arg3, 0);
8230 }
8231 break;
8232 case TARGET_NR_fsetxattr:
8233 {
8234 void *n, *v = 0;
8235 if (arg3) {
8236 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8237 if (!v) {
8238 ret = -TARGET_EFAULT;
8239 break;
8240 }
8241 }
8242 n = lock_user_string(arg2);
8243 if (n) {
8244 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8245 } else {
8246 ret = -TARGET_EFAULT;
8247 }
8248 unlock_user(n, arg2, 0);
8249 unlock_user(v, arg3, 0);
8250 }
8251 break;
8252 case TARGET_NR_getxattr:
8253 case TARGET_NR_lgetxattr:
8254 {
8255 void *p, *n, *v = 0;
8256 if (arg3) {
8257 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8258 if (!v) {
8259 ret = -TARGET_EFAULT;
8260 break;
8261 }
8262 }
8263 p = lock_user_string(arg1);
8264 n = lock_user_string(arg2);
8265 if (p && n) {
8266 if (num == TARGET_NR_getxattr) {
8267 ret = get_errno(getxattr(p, n, v, arg4));
8268 } else {
8269 ret = get_errno(lgetxattr(p, n, v, arg4));
8270 }
8271 } else {
8272 ret = -TARGET_EFAULT;
8273 }
8274 unlock_user(p, arg1, 0);
8275 unlock_user(n, arg2, 0);
8276 unlock_user(v, arg3, arg4);
8277 }
8278 break;
8279 case TARGET_NR_fgetxattr:
8280 {
8281 void *n, *v = 0;
8282 if (arg3) {
8283 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8284 if (!v) {
8285 ret = -TARGET_EFAULT;
8286 break;
8287 }
8288 }
8289 n = lock_user_string(arg2);
8290 if (n) {
8291 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8292 } else {
8293 ret = -TARGET_EFAULT;
8294 }
8295 unlock_user(n, arg2, 0);
8296 unlock_user(v, arg3, arg4);
8297 }
8298 break;
8299 case TARGET_NR_removexattr:
8300 case TARGET_NR_lremovexattr:
8301 {
8302 void *p, *n;
8303 p = lock_user_string(arg1);
8304 n = lock_user_string(arg2);
8305 if (p && n) {
8306 if (num == TARGET_NR_removexattr) {
8307 ret = get_errno(removexattr(p, n));
8308 } else {
8309 ret = get_errno(lremovexattr(p, n));
8310 }
8311 } else {
8312 ret = -TARGET_EFAULT;
8313 }
8314 unlock_user(p, arg1, 0);
8315 unlock_user(n, arg2, 0);
8316 }
8317 break;
8318 case TARGET_NR_fremovexattr:
8319 {
8320 void *n;
8321 n = lock_user_string(arg2);
8322 if (n) {
8323 ret = get_errno(fremovexattr(arg1, n));
8324 } else {
8325 ret = -TARGET_EFAULT;
8326 }
8327 unlock_user(n, arg2, 0);
8328 }
8329 break;
8330 #endif
8331 #endif /* CONFIG_ATTR */
8332 #ifdef TARGET_NR_set_thread_area
8333 case TARGET_NR_set_thread_area:
8334 #if defined(TARGET_MIPS)
8335 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8336 ret = 0;
8337 break;
8338 #elif defined(TARGET_CRIS)
8339 if (arg1 & 0xff)
8340 ret = -TARGET_EINVAL;
8341 else {
8342 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8343 ret = 0;
8344 }
8345 break;
8346 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8347 ret = do_set_thread_area(cpu_env, arg1);
8348 break;
8349 #else
8350 goto unimplemented_nowarn;
8351 #endif
8352 #endif
8353 #ifdef TARGET_NR_get_thread_area
8354 case TARGET_NR_get_thread_area:
8355 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8356 ret = do_get_thread_area(cpu_env, arg1);
8357 #else
8358 goto unimplemented_nowarn;
8359 #endif
8360 #endif
8361 #ifdef TARGET_NR_getdomainname
8362 case TARGET_NR_getdomainname:
8363 goto unimplemented_nowarn;
8364 #endif
8365
8366 #ifdef TARGET_NR_clock_gettime
8367 case TARGET_NR_clock_gettime:
8368 {
8369 struct timespec ts;
8370 ret = get_errno(clock_gettime(arg1, &ts));
8371 if (!is_error(ret)) {
8372 host_to_target_timespec(arg2, &ts);
8373 }
8374 break;
8375 }
8376 #endif
8377 #ifdef TARGET_NR_clock_getres
8378 case TARGET_NR_clock_getres:
8379 {
8380 struct timespec ts;
8381 ret = get_errno(clock_getres(arg1, &ts));
8382 if (!is_error(ret)) {
8383 host_to_target_timespec(arg2, &ts);
8384 }
8385 break;
8386 }
8387 #endif
8388 #ifdef TARGET_NR_clock_nanosleep
8389 case TARGET_NR_clock_nanosleep:
8390 {
8391 struct timespec ts;
8392 target_to_host_timespec(&ts, arg3);
8393 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8394 if (arg4)
8395 host_to_target_timespec(arg4, &ts);
8396 break;
8397 }
8398 #endif
8399
8400 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8401 case TARGET_NR_set_tid_address:
8402 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8403 break;
8404 #endif
8405
8406 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8407 case TARGET_NR_tkill:
8408 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8409 break;
8410 #endif
8411
8412 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8413 case TARGET_NR_tgkill:
8414 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8415 target_to_host_signal(arg3)));
8416 break;
8417 #endif
8418
8419 #ifdef TARGET_NR_set_robust_list
8420 case TARGET_NR_set_robust_list:
8421 goto unimplemented_nowarn;
8422 #endif
8423
8424 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8425 case TARGET_NR_utimensat:
8426 {
8427 struct timespec *tsp, ts[2];
8428 if (!arg3) {
8429 tsp = NULL;
8430 } else {
8431 target_to_host_timespec(ts, arg3);
8432 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8433 tsp = ts;
8434 }
8435 if (!arg2)
8436 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8437 else {
8438 if (!(p = lock_user_string(arg2))) {
8439 ret = -TARGET_EFAULT;
8440 goto fail;
8441 }
8442 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8443 unlock_user(p, arg2, 0);
8444 }
8445 }
8446 break;
8447 #endif
8448 #if defined(CONFIG_USE_NPTL)
8449 case TARGET_NR_futex:
8450 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8451 break;
8452 #endif
8453 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8454 case TARGET_NR_inotify_init:
8455 ret = get_errno(sys_inotify_init());
8456 break;
8457 #endif
8458 #ifdef CONFIG_INOTIFY1
8459 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8460 case TARGET_NR_inotify_init1:
8461 ret = get_errno(sys_inotify_init1(arg1));
8462 break;
8463 #endif
8464 #endif
8465 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8466 case TARGET_NR_inotify_add_watch:
8467 p = lock_user_string(arg2);
8468 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8469 unlock_user(p, arg2, 0);
8470 break;
8471 #endif
8472 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8473 case TARGET_NR_inotify_rm_watch:
8474 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8475 break;
8476 #endif
8477
8478 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8479 case TARGET_NR_mq_open:
8480 {
8481 struct mq_attr posix_mq_attr;
8482
8483 p = lock_user_string(arg1 - 1);
8484 if (arg4 != 0)
8485 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8486 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8487 unlock_user (p, arg1, 0);
8488 }
8489 break;
8490
8491 case TARGET_NR_mq_unlink:
8492 p = lock_user_string(arg1 - 1);
8493 ret = get_errno(mq_unlink(p));
8494 unlock_user (p, arg1, 0);
8495 break;
8496
8497 case TARGET_NR_mq_timedsend:
8498 {
8499 struct timespec ts;
8500
8501 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8502 if (arg5 != 0) {
8503 target_to_host_timespec(&ts, arg5);
8504 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8505 host_to_target_timespec(arg5, &ts);
8506 }
8507 else
8508 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8509 unlock_user (p, arg2, arg3);
8510 }
8511 break;
8512
8513 case TARGET_NR_mq_timedreceive:
8514 {
8515 struct timespec ts;
8516 unsigned int prio;
8517
8518 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8519 if (arg5 != 0) {
8520 target_to_host_timespec(&ts, arg5);
8521 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8522 host_to_target_timespec(arg5, &ts);
8523 }
8524 else
8525 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8526 unlock_user (p, arg2, arg3);
8527 if (arg4 != 0)
8528 put_user_u32(prio, arg4);
8529 }
8530 break;
8531
8532 /* Not implemented for now... */
8533 /* case TARGET_NR_mq_notify: */
8534 /* break; */
8535
8536 case TARGET_NR_mq_getsetattr:
8537 {
8538 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8539 ret = 0;
8540 if (arg3 != 0) {
8541 ret = mq_getattr(arg1, &posix_mq_attr_out);
8542 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8543 }
8544 if (arg2 != 0) {
8545 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8546 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8547 }
8548
8549 }
8550 break;
8551 #endif
8552
8553 #ifdef CONFIG_SPLICE
8554 #ifdef TARGET_NR_tee
8555 case TARGET_NR_tee:
8556 {
8557 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8558 }
8559 break;
8560 #endif
8561 #ifdef TARGET_NR_splice
8562 case TARGET_NR_splice:
8563 {
8564 loff_t loff_in, loff_out;
8565 loff_t *ploff_in = NULL, *ploff_out = NULL;
8566 if(arg2) {
8567 get_user_u64(loff_in, arg2);
8568 ploff_in = &loff_in;
8569 }
8570 if(arg4) {
8571 get_user_u64(loff_out, arg2);
8572 ploff_out = &loff_out;
8573 }
8574 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8575 }
8576 break;
8577 #endif
8578 #ifdef TARGET_NR_vmsplice
8579 case TARGET_NR_vmsplice:
8580 {
8581 int count = arg3;
8582 struct iovec *vec;
8583
8584 vec = alloca(count * sizeof(struct iovec));
8585 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
8586 goto efault;
8587 ret = get_errno(vmsplice(arg1, vec, count, arg4));
8588 unlock_iovec(vec, arg2, count, 0);
8589 }
8590 break;
8591 #endif
8592 #endif /* CONFIG_SPLICE */
8593 #ifdef CONFIG_EVENTFD
8594 #if defined(TARGET_NR_eventfd)
8595 case TARGET_NR_eventfd:
8596 ret = get_errno(eventfd(arg1, 0));
8597 break;
8598 #endif
8599 #if defined(TARGET_NR_eventfd2)
8600 case TARGET_NR_eventfd2:
8601 ret = get_errno(eventfd(arg1, arg2));
8602 break;
8603 #endif
8604 #endif /* CONFIG_EVENTFD */
8605 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8606 case TARGET_NR_fallocate:
8607 #if TARGET_ABI_BITS == 32
8608 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8609 target_offset64(arg5, arg6)));
8610 #else
8611 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8612 #endif
8613 break;
8614 #endif
8615 #if defined(CONFIG_SYNC_FILE_RANGE)
8616 #if defined(TARGET_NR_sync_file_range)
8617 case TARGET_NR_sync_file_range:
8618 #if TARGET_ABI_BITS == 32
8619 #if defined(TARGET_MIPS)
8620 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8621 target_offset64(arg5, arg6), arg7));
8622 #else
8623 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8624 target_offset64(arg4, arg5), arg6));
8625 #endif /* !TARGET_MIPS */
8626 #else
8627 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8628 #endif
8629 break;
8630 #endif
8631 #if defined(TARGET_NR_sync_file_range2)
8632 case TARGET_NR_sync_file_range2:
8633 /* This is like sync_file_range but the arguments are reordered */
8634 #if TARGET_ABI_BITS == 32
8635 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8636 target_offset64(arg5, arg6), arg2));
8637 #else
8638 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8639 #endif
8640 break;
8641 #endif
8642 #endif
8643 #if defined(CONFIG_EPOLL)
8644 #if defined(TARGET_NR_epoll_create)
8645 case TARGET_NR_epoll_create:
8646 ret = get_errno(epoll_create(arg1));
8647 break;
8648 #endif
8649 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8650 case TARGET_NR_epoll_create1:
8651 ret = get_errno(epoll_create1(arg1));
8652 break;
8653 #endif
8654 #if defined(TARGET_NR_epoll_ctl)
8655 case TARGET_NR_epoll_ctl:
8656 {
8657 struct epoll_event ep;
8658 struct epoll_event *epp = 0;
8659 if (arg4) {
8660 struct target_epoll_event *target_ep;
8661 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8662 goto efault;
8663 }
8664 ep.events = tswap32(target_ep->events);
8665 /* The epoll_data_t union is just opaque data to the kernel,
8666 * so we transfer all 64 bits across and need not worry what
8667 * actual data type it is.
8668 */
8669 ep.data.u64 = tswap64(target_ep->data.u64);
8670 unlock_user_struct(target_ep, arg4, 0);
8671 epp = &ep;
8672 }
8673 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8674 break;
8675 }
8676 #endif
8677
8678 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8679 #define IMPLEMENT_EPOLL_PWAIT
8680 #endif
8681 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8682 #if defined(TARGET_NR_epoll_wait)
8683 case TARGET_NR_epoll_wait:
8684 #endif
8685 #if defined(IMPLEMENT_EPOLL_PWAIT)
8686 case TARGET_NR_epoll_pwait:
8687 #endif
8688 {
8689 struct target_epoll_event *target_ep;
8690 struct epoll_event *ep;
8691 int epfd = arg1;
8692 int maxevents = arg3;
8693 int timeout = arg4;
8694
8695 target_ep = lock_user(VERIFY_WRITE, arg2,
8696 maxevents * sizeof(struct target_epoll_event), 1);
8697 if (!target_ep) {
8698 goto efault;
8699 }
8700
8701 ep = alloca(maxevents * sizeof(struct epoll_event));
8702
8703 switch (num) {
8704 #if defined(IMPLEMENT_EPOLL_PWAIT)
8705 case TARGET_NR_epoll_pwait:
8706 {
8707 target_sigset_t *target_set;
8708 sigset_t _set, *set = &_set;
8709
8710 if (arg5) {
8711 target_set = lock_user(VERIFY_READ, arg5,
8712 sizeof(target_sigset_t), 1);
8713 if (!target_set) {
8714 unlock_user(target_ep, arg2, 0);
8715 goto efault;
8716 }
8717 target_to_host_sigset(set, target_set);
8718 unlock_user(target_set, arg5, 0);
8719 } else {
8720 set = NULL;
8721 }
8722
8723 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8724 break;
8725 }
8726 #endif
8727 #if defined(TARGET_NR_epoll_wait)
8728 case TARGET_NR_epoll_wait:
8729 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8730 break;
8731 #endif
8732 default:
8733 ret = -TARGET_ENOSYS;
8734 }
8735 if (!is_error(ret)) {
8736 int i;
8737 for (i = 0; i < ret; i++) {
8738 target_ep[i].events = tswap32(ep[i].events);
8739 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8740 }
8741 }
8742 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8743 break;
8744 }
8745 #endif
8746 #endif
8747 #ifdef TARGET_NR_prlimit64
8748 case TARGET_NR_prlimit64:
8749 {
8750 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8751 struct target_rlimit64 *target_rnew, *target_rold;
8752 struct host_rlimit64 rnew, rold, *rnewp = 0;
8753 if (arg3) {
8754 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8755 goto efault;
8756 }
8757 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8758 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8759 unlock_user_struct(target_rnew, arg3, 0);
8760 rnewp = &rnew;
8761 }
8762
8763 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8764 if (!is_error(ret) && arg4) {
8765 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8766 goto efault;
8767 }
8768 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8769 target_rold->rlim_max = tswap64(rold.rlim_max);
8770 unlock_user_struct(target_rold, arg4, 1);
8771 }
8772 break;
8773 }
8774 #endif
8775 default:
8776 unimplemented:
8777 gemu_log("qemu: Unsupported syscall: %d\n", num);
8778 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8779 unimplemented_nowarn:
8780 #endif
8781 ret = -TARGET_ENOSYS;
8782 break;
8783 }
8784 fail:
8785 #ifdef DEBUG
8786 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8787 #endif
8788 if(do_strace)
8789 print_syscall_ret(num, ret);
8790 return ret;
8791 efault:
8792 ret = -TARGET_EFAULT;
8793 goto fail;
8794 }