]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
linux-user: Define AT_RANDOM to support target stack protection mechanism.
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <qemu-common.h>
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
80
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #include <linux/vt.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
97
98 #include "qemu.h"
99 #include "qemu-common.h"
100
101 #if defined(CONFIG_USE_NPTL)
102 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
103 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
104 #else
105 /* XXX: Hardcode the above values. */
106 #define CLONE_NPTL_FLAGS2 0
107 #endif
108
109 //#define DEBUG
110
111 //#include <linux/msdos_fs.h>
112 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
113 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
114
115
116 #undef _syscall0
117 #undef _syscall1
118 #undef _syscall2
119 #undef _syscall3
120 #undef _syscall4
121 #undef _syscall5
122 #undef _syscall6
123
124 #define _syscall0(type,name) \
125 static type name (void) \
126 { \
127 return syscall(__NR_##name); \
128 }
129
130 #define _syscall1(type,name,type1,arg1) \
131 static type name (type1 arg1) \
132 { \
133 return syscall(__NR_##name, arg1); \
134 }
135
136 #define _syscall2(type,name,type1,arg1,type2,arg2) \
137 static type name (type1 arg1,type2 arg2) \
138 { \
139 return syscall(__NR_##name, arg1, arg2); \
140 }
141
142 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
143 static type name (type1 arg1,type2 arg2,type3 arg3) \
144 { \
145 return syscall(__NR_##name, arg1, arg2, arg3); \
146 }
147
148 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
150 { \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
152 }
153
154 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
155 type5,arg5) \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
157 { \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
159 }
160
161
162 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5,type6,arg6) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
165 type6 arg6) \
166 { \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
168 }
169
170
171 #define __NR_sys_uname __NR_uname
172 #define __NR_sys_faccessat __NR_faccessat
173 #define __NR_sys_fchmodat __NR_fchmodat
174 #define __NR_sys_fchownat __NR_fchownat
175 #define __NR_sys_fstatat64 __NR_fstatat64
176 #define __NR_sys_futimesat __NR_futimesat
177 #define __NR_sys_getcwd1 __NR_getcwd
178 #define __NR_sys_getdents __NR_getdents
179 #define __NR_sys_getdents64 __NR_getdents64
180 #define __NR_sys_getpriority __NR_getpriority
181 #define __NR_sys_linkat __NR_linkat
182 #define __NR_sys_mkdirat __NR_mkdirat
183 #define __NR_sys_mknodat __NR_mknodat
184 #define __NR_sys_newfstatat __NR_newfstatat
185 #define __NR_sys_openat __NR_openat
186 #define __NR_sys_readlinkat __NR_readlinkat
187 #define __NR_sys_renameat __NR_renameat
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_symlinkat __NR_symlinkat
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_unlinkat __NR_unlinkat
194 #define __NR_sys_utimensat __NR_utimensat
195 #define __NR_sys_futex __NR_futex
196 #define __NR_sys_inotify_init __NR_inotify_init
197 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
198 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
199
200 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
201 defined(__s390x__)
202 #define __NR__llseek __NR_lseek
203 #endif
204
205 #ifdef __NR_gettid
206 _syscall0(int, gettid)
207 #else
208 /* This is a replacement for the host gettid() and must return a host
209 errno. */
210 static int gettid(void) {
211 return -ENOSYS;
212 }
213 #endif
214 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
217 #endif
218 _syscall2(int, sys_getpriority, int, which, int, who);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
222 #endif
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
227 #endif
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill,int,tid,int,sig)
230 #endif
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
233 #endif
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
236 #endif
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
240 const struct timespec *,timeout,int *,uaddr2,int,val3)
241 #endif
242 #endif
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249
250 static bitmask_transtbl fcntl_flags_tbl[] = {
251 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
252 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
253 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
254 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
255 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
256 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
257 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
258 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
259 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
260 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
261 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
262 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
263 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
264 #if defined(O_DIRECT)
265 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
266 #endif
267 { 0, 0, 0, 0 }
268 };
269
270 #define COPY_UTSNAME_FIELD(dest, src) \
271 do { \
272 /* __NEW_UTS_LEN doesn't include terminating null */ \
273 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
274 (dest)[__NEW_UTS_LEN] = '\0'; \
275 } while (0)
276
277 static int sys_uname(struct new_utsname *buf)
278 {
279 struct utsname uts_buf;
280
281 if (uname(&uts_buf) < 0)
282 return (-1);
283
284 /*
285 * Just in case these have some differences, we
286 * translate utsname to new_utsname (which is the
287 * struct linux kernel uses).
288 */
289
290 memset(buf, 0, sizeof(*buf));
291 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
292 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
293 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
294 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
295 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
296 #ifdef _GNU_SOURCE
297 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
298 #endif
299 return (0);
300
301 #undef COPY_UTSNAME_FIELD
302 }
303
304 static int sys_getcwd1(char *buf, size_t size)
305 {
306 if (getcwd(buf, size) == NULL) {
307 /* getcwd() sets errno */
308 return (-1);
309 }
310 return strlen(buf)+1;
311 }
312
313 #ifdef CONFIG_ATFILE
314 /*
315 * Host system seems to have atfile syscall stubs available. We
316 * now enable them one by one as specified by target syscall_nr.h.
317 */
318
319 #ifdef TARGET_NR_faccessat
320 static int sys_faccessat(int dirfd, const char *pathname, int mode)
321 {
322 return (faccessat(dirfd, pathname, mode, 0));
323 }
324 #endif
325 #ifdef TARGET_NR_fchmodat
326 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
327 {
328 return (fchmodat(dirfd, pathname, mode, 0));
329 }
330 #endif
331 #if defined(TARGET_NR_fchownat)
332 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
333 gid_t group, int flags)
334 {
335 return (fchownat(dirfd, pathname, owner, group, flags));
336 }
337 #endif
338 #ifdef __NR_fstatat64
339 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
340 int flags)
341 {
342 return (fstatat(dirfd, pathname, buf, flags));
343 }
344 #endif
345 #ifdef __NR_newfstatat
346 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
347 int flags)
348 {
349 return (fstatat(dirfd, pathname, buf, flags));
350 }
351 #endif
352 #ifdef TARGET_NR_futimesat
353 static int sys_futimesat(int dirfd, const char *pathname,
354 const struct timeval times[2])
355 {
356 return (futimesat(dirfd, pathname, times));
357 }
358 #endif
359 #ifdef TARGET_NR_linkat
360 static int sys_linkat(int olddirfd, const char *oldpath,
361 int newdirfd, const char *newpath, int flags)
362 {
363 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
364 }
365 #endif
366 #ifdef TARGET_NR_mkdirat
367 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
368 {
369 return (mkdirat(dirfd, pathname, mode));
370 }
371 #endif
372 #ifdef TARGET_NR_mknodat
373 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
374 dev_t dev)
375 {
376 return (mknodat(dirfd, pathname, mode, dev));
377 }
378 #endif
379 #ifdef TARGET_NR_openat
380 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
381 {
382 /*
383 * open(2) has extra parameter 'mode' when called with
384 * flag O_CREAT.
385 */
386 if ((flags & O_CREAT) != 0) {
387 va_list ap;
388 mode_t mode;
389
390 /*
391 * Get the 'mode' parameter and translate it to
392 * host bits.
393 */
394 va_start(ap, flags);
395 mode = va_arg(ap, mode_t);
396 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
397 va_end(ap);
398
399 return (openat(dirfd, pathname, flags, mode));
400 }
401 return (openat(dirfd, pathname, flags));
402 }
403 #endif
404 #ifdef TARGET_NR_readlinkat
405 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
406 {
407 return (readlinkat(dirfd, pathname, buf, bufsiz));
408 }
409 #endif
410 #ifdef TARGET_NR_renameat
411 static int sys_renameat(int olddirfd, const char *oldpath,
412 int newdirfd, const char *newpath)
413 {
414 return (renameat(olddirfd, oldpath, newdirfd, newpath));
415 }
416 #endif
417 #ifdef TARGET_NR_symlinkat
418 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
419 {
420 return (symlinkat(oldpath, newdirfd, newpath));
421 }
422 #endif
423 #ifdef TARGET_NR_unlinkat
424 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
425 {
426 return (unlinkat(dirfd, pathname, flags));
427 }
428 #endif
429 #else /* !CONFIG_ATFILE */
430
431 /*
432 * Try direct syscalls instead
433 */
434 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
435 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
436 #endif
437 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
438 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
439 #endif
440 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
441 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
442 uid_t,owner,gid_t,group,int,flags)
443 #endif
444 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
445 defined(__NR_fstatat64)
446 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
447 struct stat *,buf,int,flags)
448 #endif
449 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
450 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
451 const struct timeval *,times)
452 #endif
453 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
454 defined(__NR_newfstatat)
455 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
456 struct stat *,buf,int,flags)
457 #endif
458 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
459 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
460 int,newdirfd,const char *,newpath,int,flags)
461 #endif
462 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
463 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
464 #endif
465 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
466 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
467 mode_t,mode,dev_t,dev)
468 #endif
469 #if defined(TARGET_NR_openat) && defined(__NR_openat)
470 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
471 #endif
472 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
473 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
474 char *,buf,size_t,bufsize)
475 #endif
476 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
477 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
478 int,newdirfd,const char *,newpath)
479 #endif
480 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
481 _syscall3(int,sys_symlinkat,const char *,oldpath,
482 int,newdirfd,const char *,newpath)
483 #endif
484 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
485 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
486 #endif
487
488 #endif /* CONFIG_ATFILE */
489
490 #ifdef CONFIG_UTIMENSAT
491 static int sys_utimensat(int dirfd, const char *pathname,
492 const struct timespec times[2], int flags)
493 {
494 if (pathname == NULL)
495 return futimens(dirfd, times);
496 else
497 return utimensat(dirfd, pathname, times, flags);
498 }
499 #else
500 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
501 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
502 const struct timespec *,tsp,int,flags)
503 #endif
504 #endif /* CONFIG_UTIMENSAT */
505
506 #ifdef CONFIG_INOTIFY
507 #include <sys/inotify.h>
508
509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
510 static int sys_inotify_init(void)
511 {
512 return (inotify_init());
513 }
514 #endif
515 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
516 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
517 {
518 return (inotify_add_watch(fd, pathname, mask));
519 }
520 #endif
521 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
522 static int sys_inotify_rm_watch(int fd, int32_t wd)
523 {
524 return (inotify_rm_watch(fd, wd));
525 }
526 #endif
527 #ifdef CONFIG_INOTIFY1
528 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
529 static int sys_inotify_init1(int flags)
530 {
531 return (inotify_init1(flags));
532 }
533 #endif
534 #endif
535 #else
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY */
542
543 #if defined(TARGET_NR_ppoll)
544 #ifndef __NR_ppoll
545 # define __NR_ppoll -1
546 #endif
547 #define __NR_sys_ppoll __NR_ppoll
548 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
549 struct timespec *, timeout, const __sigset_t *, sigmask,
550 size_t, sigsetsize)
551 #endif
552
553 #if defined(TARGET_NR_pselect6)
554 #ifndef __NR_pselect6
555 # define __NR_pselect6 -1
556 #endif
557 #define __NR_sys_pselect6 __NR_pselect6
558 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
559 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
560 #endif
561
562 extern int personality(int);
563 extern int flock(int, int);
564 extern int setfsuid(int);
565 extern int setfsgid(int);
566 extern int setgroups(int, gid_t *);
567
568 #define ERRNO_TABLE_SIZE 1200
569
570 /* target_to_host_errno_table[] is initialized from
571 * host_to_target_errno_table[] in syscall_init(). */
572 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
573 };
574
575 /*
576 * This list is the union of errno values overridden in asm-<arch>/errno.h
577 * minus the errnos that are not actually generic to all archs.
578 */
579 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
580 [EIDRM] = TARGET_EIDRM,
581 [ECHRNG] = TARGET_ECHRNG,
582 [EL2NSYNC] = TARGET_EL2NSYNC,
583 [EL3HLT] = TARGET_EL3HLT,
584 [EL3RST] = TARGET_EL3RST,
585 [ELNRNG] = TARGET_ELNRNG,
586 [EUNATCH] = TARGET_EUNATCH,
587 [ENOCSI] = TARGET_ENOCSI,
588 [EL2HLT] = TARGET_EL2HLT,
589 [EDEADLK] = TARGET_EDEADLK,
590 [ENOLCK] = TARGET_ENOLCK,
591 [EBADE] = TARGET_EBADE,
592 [EBADR] = TARGET_EBADR,
593 [EXFULL] = TARGET_EXFULL,
594 [ENOANO] = TARGET_ENOANO,
595 [EBADRQC] = TARGET_EBADRQC,
596 [EBADSLT] = TARGET_EBADSLT,
597 [EBFONT] = TARGET_EBFONT,
598 [ENOSTR] = TARGET_ENOSTR,
599 [ENODATA] = TARGET_ENODATA,
600 [ETIME] = TARGET_ETIME,
601 [ENOSR] = TARGET_ENOSR,
602 [ENONET] = TARGET_ENONET,
603 [ENOPKG] = TARGET_ENOPKG,
604 [EREMOTE] = TARGET_EREMOTE,
605 [ENOLINK] = TARGET_ENOLINK,
606 [EADV] = TARGET_EADV,
607 [ESRMNT] = TARGET_ESRMNT,
608 [ECOMM] = TARGET_ECOMM,
609 [EPROTO] = TARGET_EPROTO,
610 [EDOTDOT] = TARGET_EDOTDOT,
611 [EMULTIHOP] = TARGET_EMULTIHOP,
612 [EBADMSG] = TARGET_EBADMSG,
613 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
614 [EOVERFLOW] = TARGET_EOVERFLOW,
615 [ENOTUNIQ] = TARGET_ENOTUNIQ,
616 [EBADFD] = TARGET_EBADFD,
617 [EREMCHG] = TARGET_EREMCHG,
618 [ELIBACC] = TARGET_ELIBACC,
619 [ELIBBAD] = TARGET_ELIBBAD,
620 [ELIBSCN] = TARGET_ELIBSCN,
621 [ELIBMAX] = TARGET_ELIBMAX,
622 [ELIBEXEC] = TARGET_ELIBEXEC,
623 [EILSEQ] = TARGET_EILSEQ,
624 [ENOSYS] = TARGET_ENOSYS,
625 [ELOOP] = TARGET_ELOOP,
626 [ERESTART] = TARGET_ERESTART,
627 [ESTRPIPE] = TARGET_ESTRPIPE,
628 [ENOTEMPTY] = TARGET_ENOTEMPTY,
629 [EUSERS] = TARGET_EUSERS,
630 [ENOTSOCK] = TARGET_ENOTSOCK,
631 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
632 [EMSGSIZE] = TARGET_EMSGSIZE,
633 [EPROTOTYPE] = TARGET_EPROTOTYPE,
634 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
635 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
636 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
637 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
638 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
639 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
640 [EADDRINUSE] = TARGET_EADDRINUSE,
641 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
642 [ENETDOWN] = TARGET_ENETDOWN,
643 [ENETUNREACH] = TARGET_ENETUNREACH,
644 [ENETRESET] = TARGET_ENETRESET,
645 [ECONNABORTED] = TARGET_ECONNABORTED,
646 [ECONNRESET] = TARGET_ECONNRESET,
647 [ENOBUFS] = TARGET_ENOBUFS,
648 [EISCONN] = TARGET_EISCONN,
649 [ENOTCONN] = TARGET_ENOTCONN,
650 [EUCLEAN] = TARGET_EUCLEAN,
651 [ENOTNAM] = TARGET_ENOTNAM,
652 [ENAVAIL] = TARGET_ENAVAIL,
653 [EISNAM] = TARGET_EISNAM,
654 [EREMOTEIO] = TARGET_EREMOTEIO,
655 [ESHUTDOWN] = TARGET_ESHUTDOWN,
656 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
657 [ETIMEDOUT] = TARGET_ETIMEDOUT,
658 [ECONNREFUSED] = TARGET_ECONNREFUSED,
659 [EHOSTDOWN] = TARGET_EHOSTDOWN,
660 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
661 [EALREADY] = TARGET_EALREADY,
662 [EINPROGRESS] = TARGET_EINPROGRESS,
663 [ESTALE] = TARGET_ESTALE,
664 [ECANCELED] = TARGET_ECANCELED,
665 [ENOMEDIUM] = TARGET_ENOMEDIUM,
666 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
667 #ifdef ENOKEY
668 [ENOKEY] = TARGET_ENOKEY,
669 #endif
670 #ifdef EKEYEXPIRED
671 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
672 #endif
673 #ifdef EKEYREVOKED
674 [EKEYREVOKED] = TARGET_EKEYREVOKED,
675 #endif
676 #ifdef EKEYREJECTED
677 [EKEYREJECTED] = TARGET_EKEYREJECTED,
678 #endif
679 #ifdef EOWNERDEAD
680 [EOWNERDEAD] = TARGET_EOWNERDEAD,
681 #endif
682 #ifdef ENOTRECOVERABLE
683 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
684 #endif
685 };
686
687 static inline int host_to_target_errno(int err)
688 {
689 if(host_to_target_errno_table[err])
690 return host_to_target_errno_table[err];
691 return err;
692 }
693
694 static inline int target_to_host_errno(int err)
695 {
696 if (target_to_host_errno_table[err])
697 return target_to_host_errno_table[err];
698 return err;
699 }
700
701 static inline abi_long get_errno(abi_long ret)
702 {
703 if (ret == -1)
704 return -host_to_target_errno(errno);
705 else
706 return ret;
707 }
708
709 static inline int is_error(abi_long ret)
710 {
711 return (abi_ulong)ret >= (abi_ulong)(-4096);
712 }
713
714 char *target_strerror(int err)
715 {
716 return strerror(target_to_host_errno(err));
717 }
718
719 static abi_ulong target_brk;
720 static abi_ulong target_original_brk;
721 static abi_ulong brk_page;
722
723 void target_set_brk(abi_ulong new_brk)
724 {
725 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
726 brk_page = HOST_PAGE_ALIGN(target_brk);
727 }
728
729 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
730 #define DEBUGF_BRK(message, args...)
731
732 /* do_brk() must return target values and target errnos. */
733 abi_long do_brk(abi_ulong new_brk)
734 {
735 abi_long mapped_addr;
736 int new_alloc_size;
737
738 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk);
739
740 if (!new_brk) {
741 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk);
742 return target_brk;
743 }
744 if (new_brk < target_original_brk) {
745 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk);
746 return target_brk;
747 }
748
749 /* If the new brk is less than the highest page reserved to the
750 * target heap allocation, set it and we're almost done... */
751 if (new_brk <= brk_page) {
752 /* Heap contents are initialized to zero, as for anonymous
753 * mapped pages. */
754 if (new_brk > target_brk) {
755 memset(g2h(target_brk), 0, new_brk - target_brk);
756 }
757 target_brk = new_brk;
758 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk);
759 return target_brk;
760 }
761
762 /* We need to allocate more memory after the brk... Note that
763 * we don't use MAP_FIXED because that will map over the top of
764 * any existing mapping (like the one with the host libc or qemu
765 * itself); instead we treat "mapped but at wrong address" as
766 * a failure and unmap again.
767 */
768 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
769 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
770 PROT_READ|PROT_WRITE,
771 MAP_ANON|MAP_PRIVATE, 0, 0));
772
773 if (mapped_addr == brk_page) {
774 target_brk = new_brk;
775 brk_page = HOST_PAGE_ALIGN(target_brk);
776 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk);
777 return target_brk;
778 } else if (mapped_addr != -1) {
779 /* Mapped but at wrong address, meaning there wasn't actually
780 * enough space for this brk.
781 */
782 target_munmap(mapped_addr, new_alloc_size);
783 mapped_addr = -1;
784 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk);
785 }
786 else {
787 DEBUGF_BRK("%#010x (otherwise)\n", target_brk);
788 }
789
790 #if defined(TARGET_ALPHA)
791 /* We (partially) emulate OSF/1 on Alpha, which requires we
792 return a proper errno, not an unchanged brk value. */
793 return -TARGET_ENOMEM;
794 #endif
795 /* For everything else, return the previous break. */
796 return target_brk;
797 }
798
799 static inline abi_long copy_from_user_fdset(fd_set *fds,
800 abi_ulong target_fds_addr,
801 int n)
802 {
803 int i, nw, j, k;
804 abi_ulong b, *target_fds;
805
806 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
807 if (!(target_fds = lock_user(VERIFY_READ,
808 target_fds_addr,
809 sizeof(abi_ulong) * nw,
810 1)))
811 return -TARGET_EFAULT;
812
813 FD_ZERO(fds);
814 k = 0;
815 for (i = 0; i < nw; i++) {
816 /* grab the abi_ulong */
817 __get_user(b, &target_fds[i]);
818 for (j = 0; j < TARGET_ABI_BITS; j++) {
819 /* check the bit inside the abi_ulong */
820 if ((b >> j) & 1)
821 FD_SET(k, fds);
822 k++;
823 }
824 }
825
826 unlock_user(target_fds, target_fds_addr, 0);
827
828 return 0;
829 }
830
831 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
832 abi_ulong target_fds_addr,
833 int n)
834 {
835 if (target_fds_addr) {
836 if (copy_from_user_fdset(fds, target_fds_addr, n))
837 return -TARGET_EFAULT;
838 *fds_ptr = fds;
839 } else {
840 *fds_ptr = NULL;
841 }
842 return 0;
843 }
844
845 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
846 const fd_set *fds,
847 int n)
848 {
849 int i, nw, j, k;
850 abi_long v;
851 abi_ulong *target_fds;
852
853 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
854 if (!(target_fds = lock_user(VERIFY_WRITE,
855 target_fds_addr,
856 sizeof(abi_ulong) * nw,
857 0)))
858 return -TARGET_EFAULT;
859
860 k = 0;
861 for (i = 0; i < nw; i++) {
862 v = 0;
863 for (j = 0; j < TARGET_ABI_BITS; j++) {
864 v |= ((FD_ISSET(k, fds) != 0) << j);
865 k++;
866 }
867 __put_user(v, &target_fds[i]);
868 }
869
870 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
871
872 return 0;
873 }
874
875 #if defined(__alpha__)
876 #define HOST_HZ 1024
877 #else
878 #define HOST_HZ 100
879 #endif
880
881 static inline abi_long host_to_target_clock_t(long ticks)
882 {
883 #if HOST_HZ == TARGET_HZ
884 return ticks;
885 #else
886 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
887 #endif
888 }
889
890 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
891 const struct rusage *rusage)
892 {
893 struct target_rusage *target_rusage;
894
895 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
896 return -TARGET_EFAULT;
897 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
898 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
899 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
900 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
901 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
902 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
903 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
904 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
905 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
906 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
907 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
908 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
909 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
910 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
911 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
912 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
913 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
914 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
915 unlock_user_struct(target_rusage, target_addr, 1);
916
917 return 0;
918 }
919
920 static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
921 {
922 if (target_rlim == TARGET_RLIM_INFINITY)
923 return RLIM_INFINITY;
924 else
925 return tswapl(target_rlim);
926 }
927
928 static inline target_ulong host_to_target_rlim(rlim_t rlim)
929 {
930 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
931 return TARGET_RLIM_INFINITY;
932 else
933 return tswapl(rlim);
934 }
935
936 static inline abi_long copy_from_user_timeval(struct timeval *tv,
937 abi_ulong target_tv_addr)
938 {
939 struct target_timeval *target_tv;
940
941 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
942 return -TARGET_EFAULT;
943
944 __get_user(tv->tv_sec, &target_tv->tv_sec);
945 __get_user(tv->tv_usec, &target_tv->tv_usec);
946
947 unlock_user_struct(target_tv, target_tv_addr, 0);
948
949 return 0;
950 }
951
952 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
953 const struct timeval *tv)
954 {
955 struct target_timeval *target_tv;
956
957 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
958 return -TARGET_EFAULT;
959
960 __put_user(tv->tv_sec, &target_tv->tv_sec);
961 __put_user(tv->tv_usec, &target_tv->tv_usec);
962
963 unlock_user_struct(target_tv, target_tv_addr, 1);
964
965 return 0;
966 }
967
968 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
969 #include <mqueue.h>
970
971 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
972 abi_ulong target_mq_attr_addr)
973 {
974 struct target_mq_attr *target_mq_attr;
975
976 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
977 target_mq_attr_addr, 1))
978 return -TARGET_EFAULT;
979
980 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
981 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
982 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
983 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
984
985 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
986
987 return 0;
988 }
989
990 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
991 const struct mq_attr *attr)
992 {
993 struct target_mq_attr *target_mq_attr;
994
995 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
996 target_mq_attr_addr, 0))
997 return -TARGET_EFAULT;
998
999 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1000 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1001 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1002 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1003
1004 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1005
1006 return 0;
1007 }
1008 #endif
1009
1010 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1011 /* do_select() must return target values and target errnos. */
1012 static abi_long do_select(int n,
1013 abi_ulong rfd_addr, abi_ulong wfd_addr,
1014 abi_ulong efd_addr, abi_ulong target_tv_addr)
1015 {
1016 fd_set rfds, wfds, efds;
1017 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1018 struct timeval tv, *tv_ptr;
1019 abi_long ret;
1020
1021 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1022 if (ret) {
1023 return ret;
1024 }
1025 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1026 if (ret) {
1027 return ret;
1028 }
1029 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1030 if (ret) {
1031 return ret;
1032 }
1033
1034 if (target_tv_addr) {
1035 if (copy_from_user_timeval(&tv, target_tv_addr))
1036 return -TARGET_EFAULT;
1037 tv_ptr = &tv;
1038 } else {
1039 tv_ptr = NULL;
1040 }
1041
1042 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1043
1044 if (!is_error(ret)) {
1045 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1046 return -TARGET_EFAULT;
1047 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1048 return -TARGET_EFAULT;
1049 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1050 return -TARGET_EFAULT;
1051
1052 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1053 return -TARGET_EFAULT;
1054 }
1055
1056 return ret;
1057 }
1058 #endif
1059
1060 static abi_long do_pipe2(int host_pipe[], int flags)
1061 {
1062 #ifdef CONFIG_PIPE2
1063 return pipe2(host_pipe, flags);
1064 #else
1065 return -ENOSYS;
1066 #endif
1067 }
1068
1069 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1070 int flags, int is_pipe2)
1071 {
1072 int host_pipe[2];
1073 abi_long ret;
1074 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1075
1076 if (is_error(ret))
1077 return get_errno(ret);
1078
1079 /* Several targets have special calling conventions for the original
1080 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1081 if (!is_pipe2) {
1082 #if defined(TARGET_ALPHA)
1083 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1084 return host_pipe[0];
1085 #elif defined(TARGET_MIPS)
1086 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1087 return host_pipe[0];
1088 #elif defined(TARGET_SH4)
1089 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1090 return host_pipe[0];
1091 #endif
1092 }
1093
1094 if (put_user_s32(host_pipe[0], pipedes)
1095 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1096 return -TARGET_EFAULT;
1097 return get_errno(ret);
1098 }
1099
1100 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1101 abi_ulong target_addr,
1102 socklen_t len)
1103 {
1104 struct target_ip_mreqn *target_smreqn;
1105
1106 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1107 if (!target_smreqn)
1108 return -TARGET_EFAULT;
1109 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1110 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1111 if (len == sizeof(struct target_ip_mreqn))
1112 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1113 unlock_user(target_smreqn, target_addr, 0);
1114
1115 return 0;
1116 }
1117
1118 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1119 abi_ulong target_addr,
1120 socklen_t len)
1121 {
1122 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1123 sa_family_t sa_family;
1124 struct target_sockaddr *target_saddr;
1125
1126 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1127 if (!target_saddr)
1128 return -TARGET_EFAULT;
1129
1130 sa_family = tswap16(target_saddr->sa_family);
1131
1132 /* Oops. The caller might send a incomplete sun_path; sun_path
1133 * must be terminated by \0 (see the manual page), but
1134 * unfortunately it is quite common to specify sockaddr_un
1135 * length as "strlen(x->sun_path)" while it should be
1136 * "strlen(...) + 1". We'll fix that here if needed.
1137 * Linux kernel has a similar feature.
1138 */
1139
1140 if (sa_family == AF_UNIX) {
1141 if (len < unix_maxlen && len > 0) {
1142 char *cp = (char*)target_saddr;
1143
1144 if ( cp[len-1] && !cp[len] )
1145 len++;
1146 }
1147 if (len > unix_maxlen)
1148 len = unix_maxlen;
1149 }
1150
1151 memcpy(addr, target_saddr, len);
1152 addr->sa_family = sa_family;
1153 unlock_user(target_saddr, target_addr, 0);
1154
1155 return 0;
1156 }
1157
1158 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1159 struct sockaddr *addr,
1160 socklen_t len)
1161 {
1162 struct target_sockaddr *target_saddr;
1163
1164 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1165 if (!target_saddr)
1166 return -TARGET_EFAULT;
1167 memcpy(target_saddr, addr, len);
1168 target_saddr->sa_family = tswap16(addr->sa_family);
1169 unlock_user(target_saddr, target_addr, len);
1170
1171 return 0;
1172 }
1173
1174 /* ??? Should this also swap msgh->name? */
1175 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1176 struct target_msghdr *target_msgh)
1177 {
1178 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1179 abi_long msg_controllen;
1180 abi_ulong target_cmsg_addr;
1181 struct target_cmsghdr *target_cmsg;
1182 socklen_t space = 0;
1183
1184 msg_controllen = tswapl(target_msgh->msg_controllen);
1185 if (msg_controllen < sizeof (struct target_cmsghdr))
1186 goto the_end;
1187 target_cmsg_addr = tswapl(target_msgh->msg_control);
1188 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1189 if (!target_cmsg)
1190 return -TARGET_EFAULT;
1191
1192 while (cmsg && target_cmsg) {
1193 void *data = CMSG_DATA(cmsg);
1194 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1195
1196 int len = tswapl(target_cmsg->cmsg_len)
1197 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1198
1199 space += CMSG_SPACE(len);
1200 if (space > msgh->msg_controllen) {
1201 space -= CMSG_SPACE(len);
1202 gemu_log("Host cmsg overflow\n");
1203 break;
1204 }
1205
1206 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1207 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1208 cmsg->cmsg_len = CMSG_LEN(len);
1209
1210 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1211 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1212 memcpy(data, target_data, len);
1213 } else {
1214 int *fd = (int *)data;
1215 int *target_fd = (int *)target_data;
1216 int i, numfds = len / sizeof(int);
1217
1218 for (i = 0; i < numfds; i++)
1219 fd[i] = tswap32(target_fd[i]);
1220 }
1221
1222 cmsg = CMSG_NXTHDR(msgh, cmsg);
1223 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1224 }
1225 unlock_user(target_cmsg, target_cmsg_addr, 0);
1226 the_end:
1227 msgh->msg_controllen = space;
1228 return 0;
1229 }
1230
1231 /* ??? Should this also swap msgh->name? */
1232 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1233 struct msghdr *msgh)
1234 {
1235 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1236 abi_long msg_controllen;
1237 abi_ulong target_cmsg_addr;
1238 struct target_cmsghdr *target_cmsg;
1239 socklen_t space = 0;
1240
1241 msg_controllen = tswapl(target_msgh->msg_controllen);
1242 if (msg_controllen < sizeof (struct target_cmsghdr))
1243 goto the_end;
1244 target_cmsg_addr = tswapl(target_msgh->msg_control);
1245 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1246 if (!target_cmsg)
1247 return -TARGET_EFAULT;
1248
1249 while (cmsg && target_cmsg) {
1250 void *data = CMSG_DATA(cmsg);
1251 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1252
1253 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1254
1255 space += TARGET_CMSG_SPACE(len);
1256 if (space > msg_controllen) {
1257 space -= TARGET_CMSG_SPACE(len);
1258 gemu_log("Target cmsg overflow\n");
1259 break;
1260 }
1261
1262 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1263 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1264 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1265
1266 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1267 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1268 memcpy(target_data, data, len);
1269 } else {
1270 int *fd = (int *)data;
1271 int *target_fd = (int *)target_data;
1272 int i, numfds = len / sizeof(int);
1273
1274 for (i = 0; i < numfds; i++)
1275 target_fd[i] = tswap32(fd[i]);
1276 }
1277
1278 cmsg = CMSG_NXTHDR(msgh, cmsg);
1279 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1280 }
1281 unlock_user(target_cmsg, target_cmsg_addr, space);
1282 the_end:
1283 target_msgh->msg_controllen = tswapl(space);
1284 return 0;
1285 }
1286
1287 /* do_setsockopt() Must return target values and target errnos. */
1288 static abi_long do_setsockopt(int sockfd, int level, int optname,
1289 abi_ulong optval_addr, socklen_t optlen)
1290 {
1291 abi_long ret;
1292 int val;
1293 struct ip_mreqn *ip_mreq;
1294 struct ip_mreq_source *ip_mreq_source;
1295
1296 switch(level) {
1297 case SOL_TCP:
1298 /* TCP options all take an 'int' value. */
1299 if (optlen < sizeof(uint32_t))
1300 return -TARGET_EINVAL;
1301
1302 if (get_user_u32(val, optval_addr))
1303 return -TARGET_EFAULT;
1304 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1305 break;
1306 case SOL_IP:
1307 switch(optname) {
1308 case IP_TOS:
1309 case IP_TTL:
1310 case IP_HDRINCL:
1311 case IP_ROUTER_ALERT:
1312 case IP_RECVOPTS:
1313 case IP_RETOPTS:
1314 case IP_PKTINFO:
1315 case IP_MTU_DISCOVER:
1316 case IP_RECVERR:
1317 case IP_RECVTOS:
1318 #ifdef IP_FREEBIND
1319 case IP_FREEBIND:
1320 #endif
1321 case IP_MULTICAST_TTL:
1322 case IP_MULTICAST_LOOP:
1323 val = 0;
1324 if (optlen >= sizeof(uint32_t)) {
1325 if (get_user_u32(val, optval_addr))
1326 return -TARGET_EFAULT;
1327 } else if (optlen >= 1) {
1328 if (get_user_u8(val, optval_addr))
1329 return -TARGET_EFAULT;
1330 }
1331 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1332 break;
1333 case IP_ADD_MEMBERSHIP:
1334 case IP_DROP_MEMBERSHIP:
1335 if (optlen < sizeof (struct target_ip_mreq) ||
1336 optlen > sizeof (struct target_ip_mreqn))
1337 return -TARGET_EINVAL;
1338
1339 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1340 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1341 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1342 break;
1343
1344 case IP_BLOCK_SOURCE:
1345 case IP_UNBLOCK_SOURCE:
1346 case IP_ADD_SOURCE_MEMBERSHIP:
1347 case IP_DROP_SOURCE_MEMBERSHIP:
1348 if (optlen != sizeof (struct target_ip_mreq_source))
1349 return -TARGET_EINVAL;
1350
1351 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1352 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1353 unlock_user (ip_mreq_source, optval_addr, 0);
1354 break;
1355
1356 default:
1357 goto unimplemented;
1358 }
1359 break;
1360 case TARGET_SOL_SOCKET:
1361 switch (optname) {
1362 /* Options with 'int' argument. */
1363 case TARGET_SO_DEBUG:
1364 optname = SO_DEBUG;
1365 break;
1366 case TARGET_SO_REUSEADDR:
1367 optname = SO_REUSEADDR;
1368 break;
1369 case TARGET_SO_TYPE:
1370 optname = SO_TYPE;
1371 break;
1372 case TARGET_SO_ERROR:
1373 optname = SO_ERROR;
1374 break;
1375 case TARGET_SO_DONTROUTE:
1376 optname = SO_DONTROUTE;
1377 break;
1378 case TARGET_SO_BROADCAST:
1379 optname = SO_BROADCAST;
1380 break;
1381 case TARGET_SO_SNDBUF:
1382 optname = SO_SNDBUF;
1383 break;
1384 case TARGET_SO_RCVBUF:
1385 optname = SO_RCVBUF;
1386 break;
1387 case TARGET_SO_KEEPALIVE:
1388 optname = SO_KEEPALIVE;
1389 break;
1390 case TARGET_SO_OOBINLINE:
1391 optname = SO_OOBINLINE;
1392 break;
1393 case TARGET_SO_NO_CHECK:
1394 optname = SO_NO_CHECK;
1395 break;
1396 case TARGET_SO_PRIORITY:
1397 optname = SO_PRIORITY;
1398 break;
1399 #ifdef SO_BSDCOMPAT
1400 case TARGET_SO_BSDCOMPAT:
1401 optname = SO_BSDCOMPAT;
1402 break;
1403 #endif
1404 case TARGET_SO_PASSCRED:
1405 optname = SO_PASSCRED;
1406 break;
1407 case TARGET_SO_TIMESTAMP:
1408 optname = SO_TIMESTAMP;
1409 break;
1410 case TARGET_SO_RCVLOWAT:
1411 optname = SO_RCVLOWAT;
1412 break;
1413 case TARGET_SO_RCVTIMEO:
1414 optname = SO_RCVTIMEO;
1415 break;
1416 case TARGET_SO_SNDTIMEO:
1417 optname = SO_SNDTIMEO;
1418 break;
1419 break;
1420 default:
1421 goto unimplemented;
1422 }
1423 if (optlen < sizeof(uint32_t))
1424 return -TARGET_EINVAL;
1425
1426 if (get_user_u32(val, optval_addr))
1427 return -TARGET_EFAULT;
1428 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1429 break;
1430 default:
1431 unimplemented:
1432 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1433 ret = -TARGET_ENOPROTOOPT;
1434 }
1435 return ret;
1436 }
1437
1438 /* do_getsockopt() Must return target values and target errnos. */
1439 static abi_long do_getsockopt(int sockfd, int level, int optname,
1440 abi_ulong optval_addr, abi_ulong optlen)
1441 {
1442 abi_long ret;
1443 int len, val;
1444 socklen_t lv;
1445
1446 switch(level) {
1447 case TARGET_SOL_SOCKET:
1448 level = SOL_SOCKET;
1449 switch (optname) {
1450 /* These don't just return a single integer */
1451 case TARGET_SO_LINGER:
1452 case TARGET_SO_RCVTIMEO:
1453 case TARGET_SO_SNDTIMEO:
1454 case TARGET_SO_PEERCRED:
1455 case TARGET_SO_PEERNAME:
1456 goto unimplemented;
1457 /* Options with 'int' argument. */
1458 case TARGET_SO_DEBUG:
1459 optname = SO_DEBUG;
1460 goto int_case;
1461 case TARGET_SO_REUSEADDR:
1462 optname = SO_REUSEADDR;
1463 goto int_case;
1464 case TARGET_SO_TYPE:
1465 optname = SO_TYPE;
1466 goto int_case;
1467 case TARGET_SO_ERROR:
1468 optname = SO_ERROR;
1469 goto int_case;
1470 case TARGET_SO_DONTROUTE:
1471 optname = SO_DONTROUTE;
1472 goto int_case;
1473 case TARGET_SO_BROADCAST:
1474 optname = SO_BROADCAST;
1475 goto int_case;
1476 case TARGET_SO_SNDBUF:
1477 optname = SO_SNDBUF;
1478 goto int_case;
1479 case TARGET_SO_RCVBUF:
1480 optname = SO_RCVBUF;
1481 goto int_case;
1482 case TARGET_SO_KEEPALIVE:
1483 optname = SO_KEEPALIVE;
1484 goto int_case;
1485 case TARGET_SO_OOBINLINE:
1486 optname = SO_OOBINLINE;
1487 goto int_case;
1488 case TARGET_SO_NO_CHECK:
1489 optname = SO_NO_CHECK;
1490 goto int_case;
1491 case TARGET_SO_PRIORITY:
1492 optname = SO_PRIORITY;
1493 goto int_case;
1494 #ifdef SO_BSDCOMPAT
1495 case TARGET_SO_BSDCOMPAT:
1496 optname = SO_BSDCOMPAT;
1497 goto int_case;
1498 #endif
1499 case TARGET_SO_PASSCRED:
1500 optname = SO_PASSCRED;
1501 goto int_case;
1502 case TARGET_SO_TIMESTAMP:
1503 optname = SO_TIMESTAMP;
1504 goto int_case;
1505 case TARGET_SO_RCVLOWAT:
1506 optname = SO_RCVLOWAT;
1507 goto int_case;
1508 default:
1509 goto int_case;
1510 }
1511 break;
1512 case SOL_TCP:
1513 /* TCP options all take an 'int' value. */
1514 int_case:
1515 if (get_user_u32(len, optlen))
1516 return -TARGET_EFAULT;
1517 if (len < 0)
1518 return -TARGET_EINVAL;
1519 lv = sizeof(lv);
1520 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1521 if (ret < 0)
1522 return ret;
1523 if (len > lv)
1524 len = lv;
1525 if (len == 4) {
1526 if (put_user_u32(val, optval_addr))
1527 return -TARGET_EFAULT;
1528 } else {
1529 if (put_user_u8(val, optval_addr))
1530 return -TARGET_EFAULT;
1531 }
1532 if (put_user_u32(len, optlen))
1533 return -TARGET_EFAULT;
1534 break;
1535 case SOL_IP:
1536 switch(optname) {
1537 case IP_TOS:
1538 case IP_TTL:
1539 case IP_HDRINCL:
1540 case IP_ROUTER_ALERT:
1541 case IP_RECVOPTS:
1542 case IP_RETOPTS:
1543 case IP_PKTINFO:
1544 case IP_MTU_DISCOVER:
1545 case IP_RECVERR:
1546 case IP_RECVTOS:
1547 #ifdef IP_FREEBIND
1548 case IP_FREEBIND:
1549 #endif
1550 case IP_MULTICAST_TTL:
1551 case IP_MULTICAST_LOOP:
1552 if (get_user_u32(len, optlen))
1553 return -TARGET_EFAULT;
1554 if (len < 0)
1555 return -TARGET_EINVAL;
1556 lv = sizeof(lv);
1557 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1558 if (ret < 0)
1559 return ret;
1560 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1561 len = 1;
1562 if (put_user_u32(len, optlen)
1563 || put_user_u8(val, optval_addr))
1564 return -TARGET_EFAULT;
1565 } else {
1566 if (len > sizeof(int))
1567 len = sizeof(int);
1568 if (put_user_u32(len, optlen)
1569 || put_user_u32(val, optval_addr))
1570 return -TARGET_EFAULT;
1571 }
1572 break;
1573 default:
1574 ret = -TARGET_ENOPROTOOPT;
1575 break;
1576 }
1577 break;
1578 default:
1579 unimplemented:
1580 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1581 level, optname);
1582 ret = -TARGET_EOPNOTSUPP;
1583 break;
1584 }
1585 return ret;
1586 }
1587
1588 /* FIXME
1589 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1590 * other lock functions have a return code of 0 for failure.
1591 */
1592 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1593 int count, int copy)
1594 {
1595 struct target_iovec *target_vec;
1596 abi_ulong base;
1597 int i;
1598
1599 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1600 if (!target_vec)
1601 return -TARGET_EFAULT;
1602 for(i = 0;i < count; i++) {
1603 base = tswapl(target_vec[i].iov_base);
1604 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1605 if (vec[i].iov_len != 0) {
1606 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1607 /* Don't check lock_user return value. We must call writev even
1608 if a element has invalid base address. */
1609 } else {
1610 /* zero length pointer is ignored */
1611 vec[i].iov_base = NULL;
1612 }
1613 }
1614 unlock_user (target_vec, target_addr, 0);
1615 return 0;
1616 }
1617
1618 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1619 int count, int copy)
1620 {
1621 struct target_iovec *target_vec;
1622 abi_ulong base;
1623 int i;
1624
1625 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1626 if (!target_vec)
1627 return -TARGET_EFAULT;
1628 for(i = 0;i < count; i++) {
1629 if (target_vec[i].iov_base) {
1630 base = tswapl(target_vec[i].iov_base);
1631 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1632 }
1633 }
1634 unlock_user (target_vec, target_addr, 0);
1635
1636 return 0;
1637 }
1638
1639 /* do_socket() Must return target values and target errnos. */
1640 static abi_long do_socket(int domain, int type, int protocol)
1641 {
1642 #if defined(TARGET_MIPS)
1643 switch(type) {
1644 case TARGET_SOCK_DGRAM:
1645 type = SOCK_DGRAM;
1646 break;
1647 case TARGET_SOCK_STREAM:
1648 type = SOCK_STREAM;
1649 break;
1650 case TARGET_SOCK_RAW:
1651 type = SOCK_RAW;
1652 break;
1653 case TARGET_SOCK_RDM:
1654 type = SOCK_RDM;
1655 break;
1656 case TARGET_SOCK_SEQPACKET:
1657 type = SOCK_SEQPACKET;
1658 break;
1659 case TARGET_SOCK_PACKET:
1660 type = SOCK_PACKET;
1661 break;
1662 }
1663 #endif
1664 if (domain == PF_NETLINK)
1665 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1666 return get_errno(socket(domain, type, protocol));
1667 }
1668
1669 /* do_bind() Must return target values and target errnos. */
1670 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1671 socklen_t addrlen)
1672 {
1673 void *addr;
1674 abi_long ret;
1675
1676 if ((int)addrlen < 0) {
1677 return -TARGET_EINVAL;
1678 }
1679
1680 addr = alloca(addrlen+1);
1681
1682 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1683 if (ret)
1684 return ret;
1685
1686 return get_errno(bind(sockfd, addr, addrlen));
1687 }
1688
1689 /* do_connect() Must return target values and target errnos. */
1690 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1691 socklen_t addrlen)
1692 {
1693 void *addr;
1694 abi_long ret;
1695
1696 if ((int)addrlen < 0) {
1697 return -TARGET_EINVAL;
1698 }
1699
1700 addr = alloca(addrlen);
1701
1702 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1703 if (ret)
1704 return ret;
1705
1706 return get_errno(connect(sockfd, addr, addrlen));
1707 }
1708
1709 /* do_sendrecvmsg() Must return target values and target errnos. */
1710 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1711 int flags, int send)
1712 {
1713 abi_long ret, len;
1714 struct target_msghdr *msgp;
1715 struct msghdr msg;
1716 int count;
1717 struct iovec *vec;
1718 abi_ulong target_vec;
1719
1720 /* FIXME */
1721 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1722 msgp,
1723 target_msg,
1724 send ? 1 : 0))
1725 return -TARGET_EFAULT;
1726 if (msgp->msg_name) {
1727 msg.msg_namelen = tswap32(msgp->msg_namelen);
1728 msg.msg_name = alloca(msg.msg_namelen);
1729 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1730 msg.msg_namelen);
1731 if (ret) {
1732 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1733 return ret;
1734 }
1735 } else {
1736 msg.msg_name = NULL;
1737 msg.msg_namelen = 0;
1738 }
1739 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1740 msg.msg_control = alloca(msg.msg_controllen);
1741 msg.msg_flags = tswap32(msgp->msg_flags);
1742
1743 count = tswapl(msgp->msg_iovlen);
1744 vec = alloca(count * sizeof(struct iovec));
1745 target_vec = tswapl(msgp->msg_iov);
1746 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1747 msg.msg_iovlen = count;
1748 msg.msg_iov = vec;
1749
1750 if (send) {
1751 ret = target_to_host_cmsg(&msg, msgp);
1752 if (ret == 0)
1753 ret = get_errno(sendmsg(fd, &msg, flags));
1754 } else {
1755 ret = get_errno(recvmsg(fd, &msg, flags));
1756 if (!is_error(ret)) {
1757 len = ret;
1758 ret = host_to_target_cmsg(msgp, &msg);
1759 if (!is_error(ret))
1760 ret = len;
1761 }
1762 }
1763 unlock_iovec(vec, target_vec, count, !send);
1764 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1765 return ret;
1766 }
1767
1768 /* do_accept() Must return target values and target errnos. */
1769 static abi_long do_accept(int fd, abi_ulong target_addr,
1770 abi_ulong target_addrlen_addr)
1771 {
1772 socklen_t addrlen;
1773 void *addr;
1774 abi_long ret;
1775
1776 if (target_addr == 0)
1777 return get_errno(accept(fd, NULL, NULL));
1778
1779 /* linux returns EINVAL if addrlen pointer is invalid */
1780 if (get_user_u32(addrlen, target_addrlen_addr))
1781 return -TARGET_EINVAL;
1782
1783 if ((int)addrlen < 0) {
1784 return -TARGET_EINVAL;
1785 }
1786
1787 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1788 return -TARGET_EINVAL;
1789
1790 addr = alloca(addrlen);
1791
1792 ret = get_errno(accept(fd, addr, &addrlen));
1793 if (!is_error(ret)) {
1794 host_to_target_sockaddr(target_addr, addr, addrlen);
1795 if (put_user_u32(addrlen, target_addrlen_addr))
1796 ret = -TARGET_EFAULT;
1797 }
1798 return ret;
1799 }
1800
1801 /* do_getpeername() Must return target values and target errnos. */
1802 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1803 abi_ulong target_addrlen_addr)
1804 {
1805 socklen_t addrlen;
1806 void *addr;
1807 abi_long ret;
1808
1809 if (get_user_u32(addrlen, target_addrlen_addr))
1810 return -TARGET_EFAULT;
1811
1812 if ((int)addrlen < 0) {
1813 return -TARGET_EINVAL;
1814 }
1815
1816 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1817 return -TARGET_EFAULT;
1818
1819 addr = alloca(addrlen);
1820
1821 ret = get_errno(getpeername(fd, addr, &addrlen));
1822 if (!is_error(ret)) {
1823 host_to_target_sockaddr(target_addr, addr, addrlen);
1824 if (put_user_u32(addrlen, target_addrlen_addr))
1825 ret = -TARGET_EFAULT;
1826 }
1827 return ret;
1828 }
1829
1830 /* do_getsockname() Must return target values and target errnos. */
1831 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1832 abi_ulong target_addrlen_addr)
1833 {
1834 socklen_t addrlen;
1835 void *addr;
1836 abi_long ret;
1837
1838 if (get_user_u32(addrlen, target_addrlen_addr))
1839 return -TARGET_EFAULT;
1840
1841 if ((int)addrlen < 0) {
1842 return -TARGET_EINVAL;
1843 }
1844
1845 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1846 return -TARGET_EFAULT;
1847
1848 addr = alloca(addrlen);
1849
1850 ret = get_errno(getsockname(fd, addr, &addrlen));
1851 if (!is_error(ret)) {
1852 host_to_target_sockaddr(target_addr, addr, addrlen);
1853 if (put_user_u32(addrlen, target_addrlen_addr))
1854 ret = -TARGET_EFAULT;
1855 }
1856 return ret;
1857 }
1858
1859 /* do_socketpair() Must return target values and target errnos. */
1860 static abi_long do_socketpair(int domain, int type, int protocol,
1861 abi_ulong target_tab_addr)
1862 {
1863 int tab[2];
1864 abi_long ret;
1865
1866 ret = get_errno(socketpair(domain, type, protocol, tab));
1867 if (!is_error(ret)) {
1868 if (put_user_s32(tab[0], target_tab_addr)
1869 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1870 ret = -TARGET_EFAULT;
1871 }
1872 return ret;
1873 }
1874
1875 /* do_sendto() Must return target values and target errnos. */
1876 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1877 abi_ulong target_addr, socklen_t addrlen)
1878 {
1879 void *addr;
1880 void *host_msg;
1881 abi_long ret;
1882
1883 if ((int)addrlen < 0) {
1884 return -TARGET_EINVAL;
1885 }
1886
1887 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1888 if (!host_msg)
1889 return -TARGET_EFAULT;
1890 if (target_addr) {
1891 addr = alloca(addrlen);
1892 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1893 if (ret) {
1894 unlock_user(host_msg, msg, 0);
1895 return ret;
1896 }
1897 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1898 } else {
1899 ret = get_errno(send(fd, host_msg, len, flags));
1900 }
1901 unlock_user(host_msg, msg, 0);
1902 return ret;
1903 }
1904
1905 /* do_recvfrom() Must return target values and target errnos. */
1906 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1907 abi_ulong target_addr,
1908 abi_ulong target_addrlen)
1909 {
1910 socklen_t addrlen;
1911 void *addr;
1912 void *host_msg;
1913 abi_long ret;
1914
1915 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1916 if (!host_msg)
1917 return -TARGET_EFAULT;
1918 if (target_addr) {
1919 if (get_user_u32(addrlen, target_addrlen)) {
1920 ret = -TARGET_EFAULT;
1921 goto fail;
1922 }
1923 if ((int)addrlen < 0) {
1924 ret = -TARGET_EINVAL;
1925 goto fail;
1926 }
1927 addr = alloca(addrlen);
1928 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1929 } else {
1930 addr = NULL; /* To keep compiler quiet. */
1931 ret = get_errno(recv(fd, host_msg, len, flags));
1932 }
1933 if (!is_error(ret)) {
1934 if (target_addr) {
1935 host_to_target_sockaddr(target_addr, addr, addrlen);
1936 if (put_user_u32(addrlen, target_addrlen)) {
1937 ret = -TARGET_EFAULT;
1938 goto fail;
1939 }
1940 }
1941 unlock_user(host_msg, msg, len);
1942 } else {
1943 fail:
1944 unlock_user(host_msg, msg, 0);
1945 }
1946 return ret;
1947 }
1948
1949 #ifdef TARGET_NR_socketcall
1950 /* do_socketcall() Must return target values and target errnos. */
1951 static abi_long do_socketcall(int num, abi_ulong vptr)
1952 {
1953 abi_long ret;
1954 const int n = sizeof(abi_ulong);
1955
1956 switch(num) {
1957 case SOCKOP_socket:
1958 {
1959 abi_ulong domain, type, protocol;
1960
1961 if (get_user_ual(domain, vptr)
1962 || get_user_ual(type, vptr + n)
1963 || get_user_ual(protocol, vptr + 2 * n))
1964 return -TARGET_EFAULT;
1965
1966 ret = do_socket(domain, type, protocol);
1967 }
1968 break;
1969 case SOCKOP_bind:
1970 {
1971 abi_ulong sockfd;
1972 abi_ulong target_addr;
1973 socklen_t addrlen;
1974
1975 if (get_user_ual(sockfd, vptr)
1976 || get_user_ual(target_addr, vptr + n)
1977 || get_user_ual(addrlen, vptr + 2 * n))
1978 return -TARGET_EFAULT;
1979
1980 ret = do_bind(sockfd, target_addr, addrlen);
1981 }
1982 break;
1983 case SOCKOP_connect:
1984 {
1985 abi_ulong sockfd;
1986 abi_ulong target_addr;
1987 socklen_t addrlen;
1988
1989 if (get_user_ual(sockfd, vptr)
1990 || get_user_ual(target_addr, vptr + n)
1991 || get_user_ual(addrlen, vptr + 2 * n))
1992 return -TARGET_EFAULT;
1993
1994 ret = do_connect(sockfd, target_addr, addrlen);
1995 }
1996 break;
1997 case SOCKOP_listen:
1998 {
1999 abi_ulong sockfd, backlog;
2000
2001 if (get_user_ual(sockfd, vptr)
2002 || get_user_ual(backlog, vptr + n))
2003 return -TARGET_EFAULT;
2004
2005 ret = get_errno(listen(sockfd, backlog));
2006 }
2007 break;
2008 case SOCKOP_accept:
2009 {
2010 abi_ulong sockfd;
2011 abi_ulong target_addr, target_addrlen;
2012
2013 if (get_user_ual(sockfd, vptr)
2014 || get_user_ual(target_addr, vptr + n)
2015 || get_user_ual(target_addrlen, vptr + 2 * n))
2016 return -TARGET_EFAULT;
2017
2018 ret = do_accept(sockfd, target_addr, target_addrlen);
2019 }
2020 break;
2021 case SOCKOP_getsockname:
2022 {
2023 abi_ulong sockfd;
2024 abi_ulong target_addr, target_addrlen;
2025
2026 if (get_user_ual(sockfd, vptr)
2027 || get_user_ual(target_addr, vptr + n)
2028 || get_user_ual(target_addrlen, vptr + 2 * n))
2029 return -TARGET_EFAULT;
2030
2031 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2032 }
2033 break;
2034 case SOCKOP_getpeername:
2035 {
2036 abi_ulong sockfd;
2037 abi_ulong target_addr, target_addrlen;
2038
2039 if (get_user_ual(sockfd, vptr)
2040 || get_user_ual(target_addr, vptr + n)
2041 || get_user_ual(target_addrlen, vptr + 2 * n))
2042 return -TARGET_EFAULT;
2043
2044 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2045 }
2046 break;
2047 case SOCKOP_socketpair:
2048 {
2049 abi_ulong domain, type, protocol;
2050 abi_ulong tab;
2051
2052 if (get_user_ual(domain, vptr)
2053 || get_user_ual(type, vptr + n)
2054 || get_user_ual(protocol, vptr + 2 * n)
2055 || get_user_ual(tab, vptr + 3 * n))
2056 return -TARGET_EFAULT;
2057
2058 ret = do_socketpair(domain, type, protocol, tab);
2059 }
2060 break;
2061 case SOCKOP_send:
2062 {
2063 abi_ulong sockfd;
2064 abi_ulong msg;
2065 size_t len;
2066 abi_ulong flags;
2067
2068 if (get_user_ual(sockfd, vptr)
2069 || get_user_ual(msg, vptr + n)
2070 || get_user_ual(len, vptr + 2 * n)
2071 || get_user_ual(flags, vptr + 3 * n))
2072 return -TARGET_EFAULT;
2073
2074 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2075 }
2076 break;
2077 case SOCKOP_recv:
2078 {
2079 abi_ulong sockfd;
2080 abi_ulong msg;
2081 size_t len;
2082 abi_ulong flags;
2083
2084 if (get_user_ual(sockfd, vptr)
2085 || get_user_ual(msg, vptr + n)
2086 || get_user_ual(len, vptr + 2 * n)
2087 || get_user_ual(flags, vptr + 3 * n))
2088 return -TARGET_EFAULT;
2089
2090 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2091 }
2092 break;
2093 case SOCKOP_sendto:
2094 {
2095 abi_ulong sockfd;
2096 abi_ulong msg;
2097 size_t len;
2098 abi_ulong flags;
2099 abi_ulong addr;
2100 socklen_t addrlen;
2101
2102 if (get_user_ual(sockfd, vptr)
2103 || get_user_ual(msg, vptr + n)
2104 || get_user_ual(len, vptr + 2 * n)
2105 || get_user_ual(flags, vptr + 3 * n)
2106 || get_user_ual(addr, vptr + 4 * n)
2107 || get_user_ual(addrlen, vptr + 5 * n))
2108 return -TARGET_EFAULT;
2109
2110 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2111 }
2112 break;
2113 case SOCKOP_recvfrom:
2114 {
2115 abi_ulong sockfd;
2116 abi_ulong msg;
2117 size_t len;
2118 abi_ulong flags;
2119 abi_ulong addr;
2120 socklen_t addrlen;
2121
2122 if (get_user_ual(sockfd, vptr)
2123 || get_user_ual(msg, vptr + n)
2124 || get_user_ual(len, vptr + 2 * n)
2125 || get_user_ual(flags, vptr + 3 * n)
2126 || get_user_ual(addr, vptr + 4 * n)
2127 || get_user_ual(addrlen, vptr + 5 * n))
2128 return -TARGET_EFAULT;
2129
2130 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2131 }
2132 break;
2133 case SOCKOP_shutdown:
2134 {
2135 abi_ulong sockfd, how;
2136
2137 if (get_user_ual(sockfd, vptr)
2138 || get_user_ual(how, vptr + n))
2139 return -TARGET_EFAULT;
2140
2141 ret = get_errno(shutdown(sockfd, how));
2142 }
2143 break;
2144 case SOCKOP_sendmsg:
2145 case SOCKOP_recvmsg:
2146 {
2147 abi_ulong fd;
2148 abi_ulong target_msg;
2149 abi_ulong flags;
2150
2151 if (get_user_ual(fd, vptr)
2152 || get_user_ual(target_msg, vptr + n)
2153 || get_user_ual(flags, vptr + 2 * n))
2154 return -TARGET_EFAULT;
2155
2156 ret = do_sendrecvmsg(fd, target_msg, flags,
2157 (num == SOCKOP_sendmsg));
2158 }
2159 break;
2160 case SOCKOP_setsockopt:
2161 {
2162 abi_ulong sockfd;
2163 abi_ulong level;
2164 abi_ulong optname;
2165 abi_ulong optval;
2166 socklen_t optlen;
2167
2168 if (get_user_ual(sockfd, vptr)
2169 || get_user_ual(level, vptr + n)
2170 || get_user_ual(optname, vptr + 2 * n)
2171 || get_user_ual(optval, vptr + 3 * n)
2172 || get_user_ual(optlen, vptr + 4 * n))
2173 return -TARGET_EFAULT;
2174
2175 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2176 }
2177 break;
2178 case SOCKOP_getsockopt:
2179 {
2180 abi_ulong sockfd;
2181 abi_ulong level;
2182 abi_ulong optname;
2183 abi_ulong optval;
2184 socklen_t optlen;
2185
2186 if (get_user_ual(sockfd, vptr)
2187 || get_user_ual(level, vptr + n)
2188 || get_user_ual(optname, vptr + 2 * n)
2189 || get_user_ual(optval, vptr + 3 * n)
2190 || get_user_ual(optlen, vptr + 4 * n))
2191 return -TARGET_EFAULT;
2192
2193 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2194 }
2195 break;
2196 default:
2197 gemu_log("Unsupported socketcall: %d\n", num);
2198 ret = -TARGET_ENOSYS;
2199 break;
2200 }
2201 return ret;
2202 }
2203 #endif
2204
2205 #define N_SHM_REGIONS 32
2206
2207 static struct shm_region {
2208 abi_ulong start;
2209 abi_ulong size;
2210 } shm_regions[N_SHM_REGIONS];
2211
2212 struct target_ipc_perm
2213 {
2214 abi_long __key;
2215 abi_ulong uid;
2216 abi_ulong gid;
2217 abi_ulong cuid;
2218 abi_ulong cgid;
2219 unsigned short int mode;
2220 unsigned short int __pad1;
2221 unsigned short int __seq;
2222 unsigned short int __pad2;
2223 abi_ulong __unused1;
2224 abi_ulong __unused2;
2225 };
2226
2227 struct target_semid_ds
2228 {
2229 struct target_ipc_perm sem_perm;
2230 abi_ulong sem_otime;
2231 abi_ulong __unused1;
2232 abi_ulong sem_ctime;
2233 abi_ulong __unused2;
2234 abi_ulong sem_nsems;
2235 abi_ulong __unused3;
2236 abi_ulong __unused4;
2237 };
2238
2239 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2240 abi_ulong target_addr)
2241 {
2242 struct target_ipc_perm *target_ip;
2243 struct target_semid_ds *target_sd;
2244
2245 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2246 return -TARGET_EFAULT;
2247 target_ip = &(target_sd->sem_perm);
2248 host_ip->__key = tswapl(target_ip->__key);
2249 host_ip->uid = tswapl(target_ip->uid);
2250 host_ip->gid = tswapl(target_ip->gid);
2251 host_ip->cuid = tswapl(target_ip->cuid);
2252 host_ip->cgid = tswapl(target_ip->cgid);
2253 host_ip->mode = tswapl(target_ip->mode);
2254 unlock_user_struct(target_sd, target_addr, 0);
2255 return 0;
2256 }
2257
2258 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2259 struct ipc_perm *host_ip)
2260 {
2261 struct target_ipc_perm *target_ip;
2262 struct target_semid_ds *target_sd;
2263
2264 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2265 return -TARGET_EFAULT;
2266 target_ip = &(target_sd->sem_perm);
2267 target_ip->__key = tswapl(host_ip->__key);
2268 target_ip->uid = tswapl(host_ip->uid);
2269 target_ip->gid = tswapl(host_ip->gid);
2270 target_ip->cuid = tswapl(host_ip->cuid);
2271 target_ip->cgid = tswapl(host_ip->cgid);
2272 target_ip->mode = tswapl(host_ip->mode);
2273 unlock_user_struct(target_sd, target_addr, 1);
2274 return 0;
2275 }
2276
2277 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2278 abi_ulong target_addr)
2279 {
2280 struct target_semid_ds *target_sd;
2281
2282 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2283 return -TARGET_EFAULT;
2284 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2285 return -TARGET_EFAULT;
2286 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2287 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2288 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2289 unlock_user_struct(target_sd, target_addr, 0);
2290 return 0;
2291 }
2292
2293 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2294 struct semid_ds *host_sd)
2295 {
2296 struct target_semid_ds *target_sd;
2297
2298 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2299 return -TARGET_EFAULT;
2300 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2301 return -TARGET_EFAULT;;
2302 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2303 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2304 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2305 unlock_user_struct(target_sd, target_addr, 1);
2306 return 0;
2307 }
2308
2309 struct target_seminfo {
2310 int semmap;
2311 int semmni;
2312 int semmns;
2313 int semmnu;
2314 int semmsl;
2315 int semopm;
2316 int semume;
2317 int semusz;
2318 int semvmx;
2319 int semaem;
2320 };
2321
2322 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2323 struct seminfo *host_seminfo)
2324 {
2325 struct target_seminfo *target_seminfo;
2326 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2327 return -TARGET_EFAULT;
2328 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2329 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2330 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2331 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2332 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2333 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2334 __put_user(host_seminfo->semume, &target_seminfo->semume);
2335 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2336 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2337 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2338 unlock_user_struct(target_seminfo, target_addr, 1);
2339 return 0;
2340 }
2341
2342 union semun {
2343 int val;
2344 struct semid_ds *buf;
2345 unsigned short *array;
2346 struct seminfo *__buf;
2347 };
2348
2349 union target_semun {
2350 int val;
2351 abi_ulong buf;
2352 abi_ulong array;
2353 abi_ulong __buf;
2354 };
2355
2356 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2357 abi_ulong target_addr)
2358 {
2359 int nsems;
2360 unsigned short *array;
2361 union semun semun;
2362 struct semid_ds semid_ds;
2363 int i, ret;
2364
2365 semun.buf = &semid_ds;
2366
2367 ret = semctl(semid, 0, IPC_STAT, semun);
2368 if (ret == -1)
2369 return get_errno(ret);
2370
2371 nsems = semid_ds.sem_nsems;
2372
2373 *host_array = malloc(nsems*sizeof(unsigned short));
2374 array = lock_user(VERIFY_READ, target_addr,
2375 nsems*sizeof(unsigned short), 1);
2376 if (!array)
2377 return -TARGET_EFAULT;
2378
2379 for(i=0; i<nsems; i++) {
2380 __get_user((*host_array)[i], &array[i]);
2381 }
2382 unlock_user(array, target_addr, 0);
2383
2384 return 0;
2385 }
2386
2387 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2388 unsigned short **host_array)
2389 {
2390 int nsems;
2391 unsigned short *array;
2392 union semun semun;
2393 struct semid_ds semid_ds;
2394 int i, ret;
2395
2396 semun.buf = &semid_ds;
2397
2398 ret = semctl(semid, 0, IPC_STAT, semun);
2399 if (ret == -1)
2400 return get_errno(ret);
2401
2402 nsems = semid_ds.sem_nsems;
2403
2404 array = lock_user(VERIFY_WRITE, target_addr,
2405 nsems*sizeof(unsigned short), 0);
2406 if (!array)
2407 return -TARGET_EFAULT;
2408
2409 for(i=0; i<nsems; i++) {
2410 __put_user((*host_array)[i], &array[i]);
2411 }
2412 free(*host_array);
2413 unlock_user(array, target_addr, 1);
2414
2415 return 0;
2416 }
2417
2418 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2419 union target_semun target_su)
2420 {
2421 union semun arg;
2422 struct semid_ds dsarg;
2423 unsigned short *array = NULL;
2424 struct seminfo seminfo;
2425 abi_long ret = -TARGET_EINVAL;
2426 abi_long err;
2427 cmd &= 0xff;
2428
2429 switch( cmd ) {
2430 case GETVAL:
2431 case SETVAL:
2432 arg.val = tswapl(target_su.val);
2433 ret = get_errno(semctl(semid, semnum, cmd, arg));
2434 target_su.val = tswapl(arg.val);
2435 break;
2436 case GETALL:
2437 case SETALL:
2438 err = target_to_host_semarray(semid, &array, target_su.array);
2439 if (err)
2440 return err;
2441 arg.array = array;
2442 ret = get_errno(semctl(semid, semnum, cmd, arg));
2443 err = host_to_target_semarray(semid, target_su.array, &array);
2444 if (err)
2445 return err;
2446 break;
2447 case IPC_STAT:
2448 case IPC_SET:
2449 case SEM_STAT:
2450 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2451 if (err)
2452 return err;
2453 arg.buf = &dsarg;
2454 ret = get_errno(semctl(semid, semnum, cmd, arg));
2455 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2456 if (err)
2457 return err;
2458 break;
2459 case IPC_INFO:
2460 case SEM_INFO:
2461 arg.__buf = &seminfo;
2462 ret = get_errno(semctl(semid, semnum, cmd, arg));
2463 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2464 if (err)
2465 return err;
2466 break;
2467 case IPC_RMID:
2468 case GETPID:
2469 case GETNCNT:
2470 case GETZCNT:
2471 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2472 break;
2473 }
2474
2475 return ret;
2476 }
2477
2478 struct target_sembuf {
2479 unsigned short sem_num;
2480 short sem_op;
2481 short sem_flg;
2482 };
2483
2484 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2485 abi_ulong target_addr,
2486 unsigned nsops)
2487 {
2488 struct target_sembuf *target_sembuf;
2489 int i;
2490
2491 target_sembuf = lock_user(VERIFY_READ, target_addr,
2492 nsops*sizeof(struct target_sembuf), 1);
2493 if (!target_sembuf)
2494 return -TARGET_EFAULT;
2495
2496 for(i=0; i<nsops; i++) {
2497 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2498 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2499 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2500 }
2501
2502 unlock_user(target_sembuf, target_addr, 0);
2503
2504 return 0;
2505 }
2506
2507 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2508 {
2509 struct sembuf sops[nsops];
2510
2511 if (target_to_host_sembuf(sops, ptr, nsops))
2512 return -TARGET_EFAULT;
2513
2514 return semop(semid, sops, nsops);
2515 }
2516
2517 struct target_msqid_ds
2518 {
2519 struct target_ipc_perm msg_perm;
2520 abi_ulong msg_stime;
2521 #if TARGET_ABI_BITS == 32
2522 abi_ulong __unused1;
2523 #endif
2524 abi_ulong msg_rtime;
2525 #if TARGET_ABI_BITS == 32
2526 abi_ulong __unused2;
2527 #endif
2528 abi_ulong msg_ctime;
2529 #if TARGET_ABI_BITS == 32
2530 abi_ulong __unused3;
2531 #endif
2532 abi_ulong __msg_cbytes;
2533 abi_ulong msg_qnum;
2534 abi_ulong msg_qbytes;
2535 abi_ulong msg_lspid;
2536 abi_ulong msg_lrpid;
2537 abi_ulong __unused4;
2538 abi_ulong __unused5;
2539 };
2540
2541 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2542 abi_ulong target_addr)
2543 {
2544 struct target_msqid_ds *target_md;
2545
2546 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2547 return -TARGET_EFAULT;
2548 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2549 return -TARGET_EFAULT;
2550 host_md->msg_stime = tswapl(target_md->msg_stime);
2551 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2552 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2553 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2554 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2555 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2556 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2557 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2558 unlock_user_struct(target_md, target_addr, 0);
2559 return 0;
2560 }
2561
2562 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2563 struct msqid_ds *host_md)
2564 {
2565 struct target_msqid_ds *target_md;
2566
2567 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2568 return -TARGET_EFAULT;
2569 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2570 return -TARGET_EFAULT;
2571 target_md->msg_stime = tswapl(host_md->msg_stime);
2572 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2573 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2574 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2575 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2576 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2577 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2578 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2579 unlock_user_struct(target_md, target_addr, 1);
2580 return 0;
2581 }
2582
2583 struct target_msginfo {
2584 int msgpool;
2585 int msgmap;
2586 int msgmax;
2587 int msgmnb;
2588 int msgmni;
2589 int msgssz;
2590 int msgtql;
2591 unsigned short int msgseg;
2592 };
2593
2594 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2595 struct msginfo *host_msginfo)
2596 {
2597 struct target_msginfo *target_msginfo;
2598 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2599 return -TARGET_EFAULT;
2600 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2601 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2602 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2603 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2604 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2605 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2606 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2607 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2608 unlock_user_struct(target_msginfo, target_addr, 1);
2609 return 0;
2610 }
2611
2612 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2613 {
2614 struct msqid_ds dsarg;
2615 struct msginfo msginfo;
2616 abi_long ret = -TARGET_EINVAL;
2617
2618 cmd &= 0xff;
2619
2620 switch (cmd) {
2621 case IPC_STAT:
2622 case IPC_SET:
2623 case MSG_STAT:
2624 if (target_to_host_msqid_ds(&dsarg,ptr))
2625 return -TARGET_EFAULT;
2626 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2627 if (host_to_target_msqid_ds(ptr,&dsarg))
2628 return -TARGET_EFAULT;
2629 break;
2630 case IPC_RMID:
2631 ret = get_errno(msgctl(msgid, cmd, NULL));
2632 break;
2633 case IPC_INFO:
2634 case MSG_INFO:
2635 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2636 if (host_to_target_msginfo(ptr, &msginfo))
2637 return -TARGET_EFAULT;
2638 break;
2639 }
2640
2641 return ret;
2642 }
2643
2644 struct target_msgbuf {
2645 abi_long mtype;
2646 char mtext[1];
2647 };
2648
2649 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2650 unsigned int msgsz, int msgflg)
2651 {
2652 struct target_msgbuf *target_mb;
2653 struct msgbuf *host_mb;
2654 abi_long ret = 0;
2655
2656 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2657 return -TARGET_EFAULT;
2658 host_mb = malloc(msgsz+sizeof(long));
2659 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2660 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2661 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2662 free(host_mb);
2663 unlock_user_struct(target_mb, msgp, 0);
2664
2665 return ret;
2666 }
2667
2668 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2669 unsigned int msgsz, abi_long msgtyp,
2670 int msgflg)
2671 {
2672 struct target_msgbuf *target_mb;
2673 char *target_mtext;
2674 struct msgbuf *host_mb;
2675 abi_long ret = 0;
2676
2677 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2678 return -TARGET_EFAULT;
2679
2680 host_mb = malloc(msgsz+sizeof(long));
2681 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2682
2683 if (ret > 0) {
2684 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2685 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2686 if (!target_mtext) {
2687 ret = -TARGET_EFAULT;
2688 goto end;
2689 }
2690 memcpy(target_mb->mtext, host_mb->mtext, ret);
2691 unlock_user(target_mtext, target_mtext_addr, ret);
2692 }
2693
2694 target_mb->mtype = tswapl(host_mb->mtype);
2695 free(host_mb);
2696
2697 end:
2698 if (target_mb)
2699 unlock_user_struct(target_mb, msgp, 1);
2700 return ret;
2701 }
2702
2703 struct target_shmid_ds
2704 {
2705 struct target_ipc_perm shm_perm;
2706 abi_ulong shm_segsz;
2707 abi_ulong shm_atime;
2708 #if TARGET_ABI_BITS == 32
2709 abi_ulong __unused1;
2710 #endif
2711 abi_ulong shm_dtime;
2712 #if TARGET_ABI_BITS == 32
2713 abi_ulong __unused2;
2714 #endif
2715 abi_ulong shm_ctime;
2716 #if TARGET_ABI_BITS == 32
2717 abi_ulong __unused3;
2718 #endif
2719 int shm_cpid;
2720 int shm_lpid;
2721 abi_ulong shm_nattch;
2722 unsigned long int __unused4;
2723 unsigned long int __unused5;
2724 };
2725
2726 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2727 abi_ulong target_addr)
2728 {
2729 struct target_shmid_ds *target_sd;
2730
2731 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2732 return -TARGET_EFAULT;
2733 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2734 return -TARGET_EFAULT;
2735 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2736 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2737 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2738 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2739 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2740 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2741 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2742 unlock_user_struct(target_sd, target_addr, 0);
2743 return 0;
2744 }
2745
2746 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2747 struct shmid_ds *host_sd)
2748 {
2749 struct target_shmid_ds *target_sd;
2750
2751 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2752 return -TARGET_EFAULT;
2753 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2754 return -TARGET_EFAULT;
2755 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2756 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2757 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2758 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2759 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2760 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2761 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2762 unlock_user_struct(target_sd, target_addr, 1);
2763 return 0;
2764 }
2765
2766 struct target_shminfo {
2767 abi_ulong shmmax;
2768 abi_ulong shmmin;
2769 abi_ulong shmmni;
2770 abi_ulong shmseg;
2771 abi_ulong shmall;
2772 };
2773
2774 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2775 struct shminfo *host_shminfo)
2776 {
2777 struct target_shminfo *target_shminfo;
2778 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2779 return -TARGET_EFAULT;
2780 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2781 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2782 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2783 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2784 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2785 unlock_user_struct(target_shminfo, target_addr, 1);
2786 return 0;
2787 }
2788
2789 struct target_shm_info {
2790 int used_ids;
2791 abi_ulong shm_tot;
2792 abi_ulong shm_rss;
2793 abi_ulong shm_swp;
2794 abi_ulong swap_attempts;
2795 abi_ulong swap_successes;
2796 };
2797
2798 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2799 struct shm_info *host_shm_info)
2800 {
2801 struct target_shm_info *target_shm_info;
2802 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2803 return -TARGET_EFAULT;
2804 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2805 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2806 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2807 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2808 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2809 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2810 unlock_user_struct(target_shm_info, target_addr, 1);
2811 return 0;
2812 }
2813
2814 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2815 {
2816 struct shmid_ds dsarg;
2817 struct shminfo shminfo;
2818 struct shm_info shm_info;
2819 abi_long ret = -TARGET_EINVAL;
2820
2821 cmd &= 0xff;
2822
2823 switch(cmd) {
2824 case IPC_STAT:
2825 case IPC_SET:
2826 case SHM_STAT:
2827 if (target_to_host_shmid_ds(&dsarg, buf))
2828 return -TARGET_EFAULT;
2829 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2830 if (host_to_target_shmid_ds(buf, &dsarg))
2831 return -TARGET_EFAULT;
2832 break;
2833 case IPC_INFO:
2834 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2835 if (host_to_target_shminfo(buf, &shminfo))
2836 return -TARGET_EFAULT;
2837 break;
2838 case SHM_INFO:
2839 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2840 if (host_to_target_shm_info(buf, &shm_info))
2841 return -TARGET_EFAULT;
2842 break;
2843 case IPC_RMID:
2844 case SHM_LOCK:
2845 case SHM_UNLOCK:
2846 ret = get_errno(shmctl(shmid, cmd, NULL));
2847 break;
2848 }
2849
2850 return ret;
2851 }
2852
2853 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2854 {
2855 abi_long raddr;
2856 void *host_raddr;
2857 struct shmid_ds shm_info;
2858 int i,ret;
2859
2860 /* find out the length of the shared memory segment */
2861 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2862 if (is_error(ret)) {
2863 /* can't get length, bail out */
2864 return ret;
2865 }
2866
2867 mmap_lock();
2868
2869 if (shmaddr)
2870 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2871 else {
2872 abi_ulong mmap_start;
2873
2874 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2875
2876 if (mmap_start == -1) {
2877 errno = ENOMEM;
2878 host_raddr = (void *)-1;
2879 } else
2880 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2881 }
2882
2883 if (host_raddr == (void *)-1) {
2884 mmap_unlock();
2885 return get_errno((long)host_raddr);
2886 }
2887 raddr=h2g((unsigned long)host_raddr);
2888
2889 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2890 PAGE_VALID | PAGE_READ |
2891 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2892
2893 for (i = 0; i < N_SHM_REGIONS; i++) {
2894 if (shm_regions[i].start == 0) {
2895 shm_regions[i].start = raddr;
2896 shm_regions[i].size = shm_info.shm_segsz;
2897 break;
2898 }
2899 }
2900
2901 mmap_unlock();
2902 return raddr;
2903
2904 }
2905
2906 static inline abi_long do_shmdt(abi_ulong shmaddr)
2907 {
2908 int i;
2909
2910 for (i = 0; i < N_SHM_REGIONS; ++i) {
2911 if (shm_regions[i].start == shmaddr) {
2912 shm_regions[i].start = 0;
2913 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2914 break;
2915 }
2916 }
2917
2918 return get_errno(shmdt(g2h(shmaddr)));
2919 }
2920
2921 #ifdef TARGET_NR_ipc
2922 /* ??? This only works with linear mappings. */
2923 /* do_ipc() must return target values and target errnos. */
2924 static abi_long do_ipc(unsigned int call, int first,
2925 int second, int third,
2926 abi_long ptr, abi_long fifth)
2927 {
2928 int version;
2929 abi_long ret = 0;
2930
2931 version = call >> 16;
2932 call &= 0xffff;
2933
2934 switch (call) {
2935 case IPCOP_semop:
2936 ret = do_semop(first, ptr, second);
2937 break;
2938
2939 case IPCOP_semget:
2940 ret = get_errno(semget(first, second, third));
2941 break;
2942
2943 case IPCOP_semctl:
2944 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2945 break;
2946
2947 case IPCOP_msgget:
2948 ret = get_errno(msgget(first, second));
2949 break;
2950
2951 case IPCOP_msgsnd:
2952 ret = do_msgsnd(first, ptr, second, third);
2953 break;
2954
2955 case IPCOP_msgctl:
2956 ret = do_msgctl(first, second, ptr);
2957 break;
2958
2959 case IPCOP_msgrcv:
2960 switch (version) {
2961 case 0:
2962 {
2963 struct target_ipc_kludge {
2964 abi_long msgp;
2965 abi_long msgtyp;
2966 } *tmp;
2967
2968 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2969 ret = -TARGET_EFAULT;
2970 break;
2971 }
2972
2973 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2974
2975 unlock_user_struct(tmp, ptr, 0);
2976 break;
2977 }
2978 default:
2979 ret = do_msgrcv(first, ptr, second, fifth, third);
2980 }
2981 break;
2982
2983 case IPCOP_shmat:
2984 switch (version) {
2985 default:
2986 {
2987 abi_ulong raddr;
2988 raddr = do_shmat(first, ptr, second);
2989 if (is_error(raddr))
2990 return get_errno(raddr);
2991 if (put_user_ual(raddr, third))
2992 return -TARGET_EFAULT;
2993 break;
2994 }
2995 case 1:
2996 ret = -TARGET_EINVAL;
2997 break;
2998 }
2999 break;
3000 case IPCOP_shmdt:
3001 ret = do_shmdt(ptr);
3002 break;
3003
3004 case IPCOP_shmget:
3005 /* IPC_* flag values are the same on all linux platforms */
3006 ret = get_errno(shmget(first, second, third));
3007 break;
3008
3009 /* IPC_* and SHM_* command values are the same on all linux platforms */
3010 case IPCOP_shmctl:
3011 ret = do_shmctl(first, second, third);
3012 break;
3013 default:
3014 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3015 ret = -TARGET_ENOSYS;
3016 break;
3017 }
3018 return ret;
3019 }
3020 #endif
3021
3022 /* kernel structure types definitions */
3023
3024 #define STRUCT(name, ...) STRUCT_ ## name,
3025 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3026 enum {
3027 #include "syscall_types.h"
3028 };
3029 #undef STRUCT
3030 #undef STRUCT_SPECIAL
3031
3032 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3033 #define STRUCT_SPECIAL(name)
3034 #include "syscall_types.h"
3035 #undef STRUCT
3036 #undef STRUCT_SPECIAL
3037
3038 typedef struct IOCTLEntry IOCTLEntry;
3039
3040 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3041 int fd, abi_long cmd, abi_long arg);
3042
3043 struct IOCTLEntry {
3044 unsigned int target_cmd;
3045 unsigned int host_cmd;
3046 const char *name;
3047 int access;
3048 do_ioctl_fn *do_ioctl;
3049 const argtype arg_type[5];
3050 };
3051
3052 #define IOC_R 0x0001
3053 #define IOC_W 0x0002
3054 #define IOC_RW (IOC_R | IOC_W)
3055
3056 #define MAX_STRUCT_SIZE 4096
3057
3058 #ifdef CONFIG_FIEMAP
3059 /* So fiemap access checks don't overflow on 32 bit systems.
3060 * This is very slightly smaller than the limit imposed by
3061 * the underlying kernel.
3062 */
3063 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3064 / sizeof(struct fiemap_extent))
3065
3066 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3067 int fd, abi_long cmd, abi_long arg)
3068 {
3069 /* The parameter for this ioctl is a struct fiemap followed
3070 * by an array of struct fiemap_extent whose size is set
3071 * in fiemap->fm_extent_count. The array is filled in by the
3072 * ioctl.
3073 */
3074 int target_size_in, target_size_out;
3075 struct fiemap *fm;
3076 const argtype *arg_type = ie->arg_type;
3077 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3078 void *argptr, *p;
3079 abi_long ret;
3080 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3081 uint32_t outbufsz;
3082 int free_fm = 0;
3083
3084 assert(arg_type[0] == TYPE_PTR);
3085 assert(ie->access == IOC_RW);
3086 arg_type++;
3087 target_size_in = thunk_type_size(arg_type, 0);
3088 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3089 if (!argptr) {
3090 return -TARGET_EFAULT;
3091 }
3092 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3093 unlock_user(argptr, arg, 0);
3094 fm = (struct fiemap *)buf_temp;
3095 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3096 return -TARGET_EINVAL;
3097 }
3098
3099 outbufsz = sizeof (*fm) +
3100 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3101
3102 if (outbufsz > MAX_STRUCT_SIZE) {
3103 /* We can't fit all the extents into the fixed size buffer.
3104 * Allocate one that is large enough and use it instead.
3105 */
3106 fm = malloc(outbufsz);
3107 if (!fm) {
3108 return -TARGET_ENOMEM;
3109 }
3110 memcpy(fm, buf_temp, sizeof(struct fiemap));
3111 free_fm = 1;
3112 }
3113 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3114 if (!is_error(ret)) {
3115 target_size_out = target_size_in;
3116 /* An extent_count of 0 means we were only counting the extents
3117 * so there are no structs to copy
3118 */
3119 if (fm->fm_extent_count != 0) {
3120 target_size_out += fm->fm_mapped_extents * extent_size;
3121 }
3122 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3123 if (!argptr) {
3124 ret = -TARGET_EFAULT;
3125 } else {
3126 /* Convert the struct fiemap */
3127 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3128 if (fm->fm_extent_count != 0) {
3129 p = argptr + target_size_in;
3130 /* ...and then all the struct fiemap_extents */
3131 for (i = 0; i < fm->fm_mapped_extents; i++) {
3132 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3133 THUNK_TARGET);
3134 p += extent_size;
3135 }
3136 }
3137 unlock_user(argptr, arg, target_size_out);
3138 }
3139 }
3140 if (free_fm) {
3141 free(fm);
3142 }
3143 return ret;
3144 }
3145 #endif
3146
3147 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3148 int fd, abi_long cmd, abi_long arg)
3149 {
3150 const argtype *arg_type = ie->arg_type;
3151 int target_size;
3152 void *argptr;
3153 int ret;
3154 struct ifconf *host_ifconf;
3155 uint32_t outbufsz;
3156 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3157 int target_ifreq_size;
3158 int nb_ifreq;
3159 int free_buf = 0;
3160 int i;
3161 int target_ifc_len;
3162 abi_long target_ifc_buf;
3163 int host_ifc_len;
3164 char *host_ifc_buf;
3165
3166 assert(arg_type[0] == TYPE_PTR);
3167 assert(ie->access == IOC_RW);
3168
3169 arg_type++;
3170 target_size = thunk_type_size(arg_type, 0);
3171
3172 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3173 if (!argptr)
3174 return -TARGET_EFAULT;
3175 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3176 unlock_user(argptr, arg, 0);
3177
3178 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3179 target_ifc_len = host_ifconf->ifc_len;
3180 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3181
3182 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3183 nb_ifreq = target_ifc_len / target_ifreq_size;
3184 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3185
3186 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3187 if (outbufsz > MAX_STRUCT_SIZE) {
3188 /* We can't fit all the extents into the fixed size buffer.
3189 * Allocate one that is large enough and use it instead.
3190 */
3191 host_ifconf = malloc(outbufsz);
3192 if (!host_ifconf) {
3193 return -TARGET_ENOMEM;
3194 }
3195 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3196 free_buf = 1;
3197 }
3198 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3199
3200 host_ifconf->ifc_len = host_ifc_len;
3201 host_ifconf->ifc_buf = host_ifc_buf;
3202
3203 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3204 if (!is_error(ret)) {
3205 /* convert host ifc_len to target ifc_len */
3206
3207 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3208 target_ifc_len = nb_ifreq * target_ifreq_size;
3209 host_ifconf->ifc_len = target_ifc_len;
3210
3211 /* restore target ifc_buf */
3212
3213 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3214
3215 /* copy struct ifconf to target user */
3216
3217 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3218 if (!argptr)
3219 return -TARGET_EFAULT;
3220 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3221 unlock_user(argptr, arg, target_size);
3222
3223 /* copy ifreq[] to target user */
3224
3225 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3226 for (i = 0; i < nb_ifreq ; i++) {
3227 thunk_convert(argptr + i * target_ifreq_size,
3228 host_ifc_buf + i * sizeof(struct ifreq),
3229 ifreq_arg_type, THUNK_TARGET);
3230 }
3231 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3232 }
3233
3234 if (free_buf) {
3235 free(host_ifconf);
3236 }
3237
3238 return ret;
3239 }
3240
3241 static IOCTLEntry ioctl_entries[] = {
3242 #define IOCTL(cmd, access, ...) \
3243 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3244 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3245 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3246 #include "ioctls.h"
3247 { 0, 0, },
3248 };
3249
3250 /* ??? Implement proper locking for ioctls. */
3251 /* do_ioctl() Must return target values and target errnos. */
3252 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3253 {
3254 const IOCTLEntry *ie;
3255 const argtype *arg_type;
3256 abi_long ret;
3257 uint8_t buf_temp[MAX_STRUCT_SIZE];
3258 int target_size;
3259 void *argptr;
3260
3261 ie = ioctl_entries;
3262 for(;;) {
3263 if (ie->target_cmd == 0) {
3264 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3265 return -TARGET_ENOSYS;
3266 }
3267 if (ie->target_cmd == cmd)
3268 break;
3269 ie++;
3270 }
3271 arg_type = ie->arg_type;
3272 #if defined(DEBUG)
3273 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3274 #endif
3275 if (ie->do_ioctl) {
3276 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3277 }
3278
3279 switch(arg_type[0]) {
3280 case TYPE_NULL:
3281 /* no argument */
3282 ret = get_errno(ioctl(fd, ie->host_cmd));
3283 break;
3284 case TYPE_PTRVOID:
3285 case TYPE_INT:
3286 /* int argment */
3287 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3288 break;
3289 case TYPE_PTR:
3290 arg_type++;
3291 target_size = thunk_type_size(arg_type, 0);
3292 switch(ie->access) {
3293 case IOC_R:
3294 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3295 if (!is_error(ret)) {
3296 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3297 if (!argptr)
3298 return -TARGET_EFAULT;
3299 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3300 unlock_user(argptr, arg, target_size);
3301 }
3302 break;
3303 case IOC_W:
3304 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3305 if (!argptr)
3306 return -TARGET_EFAULT;
3307 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3308 unlock_user(argptr, arg, 0);
3309 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3310 break;
3311 default:
3312 case IOC_RW:
3313 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3314 if (!argptr)
3315 return -TARGET_EFAULT;
3316 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3317 unlock_user(argptr, arg, 0);
3318 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3319 if (!is_error(ret)) {
3320 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3321 if (!argptr)
3322 return -TARGET_EFAULT;
3323 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3324 unlock_user(argptr, arg, target_size);
3325 }
3326 break;
3327 }
3328 break;
3329 default:
3330 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3331 (long)cmd, arg_type[0]);
3332 ret = -TARGET_ENOSYS;
3333 break;
3334 }
3335 return ret;
3336 }
3337
3338 static const bitmask_transtbl iflag_tbl[] = {
3339 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3340 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3341 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3342 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3343 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3344 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3345 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3346 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3347 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3348 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3349 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3350 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3351 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3352 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3353 { 0, 0, 0, 0 }
3354 };
3355
3356 static const bitmask_transtbl oflag_tbl[] = {
3357 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3358 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3359 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3360 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3361 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3362 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3363 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3364 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3365 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3366 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3367 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3368 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3369 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3370 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3371 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3372 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3373 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3374 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3375 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3376 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3377 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3378 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3379 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3380 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3381 { 0, 0, 0, 0 }
3382 };
3383
3384 static const bitmask_transtbl cflag_tbl[] = {
3385 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3386 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3387 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3388 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3389 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3390 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3391 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3392 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3393 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3394 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3395 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3396 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3397 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3398 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3399 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3400 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3401 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3402 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3403 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3404 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3405 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3406 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3407 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3408 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3409 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3410 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3411 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3412 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3413 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3414 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3415 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3416 { 0, 0, 0, 0 }
3417 };
3418
3419 static const bitmask_transtbl lflag_tbl[] = {
3420 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3421 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3422 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3423 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3424 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3425 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3426 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3427 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3428 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3429 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3430 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3431 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3432 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3433 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3434 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3435 { 0, 0, 0, 0 }
3436 };
3437
3438 static void target_to_host_termios (void *dst, const void *src)
3439 {
3440 struct host_termios *host = dst;
3441 const struct target_termios *target = src;
3442
3443 host->c_iflag =
3444 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3445 host->c_oflag =
3446 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3447 host->c_cflag =
3448 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3449 host->c_lflag =
3450 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3451 host->c_line = target->c_line;
3452
3453 memset(host->c_cc, 0, sizeof(host->c_cc));
3454 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3455 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3456 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3457 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3458 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3459 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3460 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3461 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3462 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3463 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3464 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3465 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3466 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3467 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3468 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3469 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3470 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3471 }
3472
3473 static void host_to_target_termios (void *dst, const void *src)
3474 {
3475 struct target_termios *target = dst;
3476 const struct host_termios *host = src;
3477
3478 target->c_iflag =
3479 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3480 target->c_oflag =
3481 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3482 target->c_cflag =
3483 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3484 target->c_lflag =
3485 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3486 target->c_line = host->c_line;
3487
3488 memset(target->c_cc, 0, sizeof(target->c_cc));
3489 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3490 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3491 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3492 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3493 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3494 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3495 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3496 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3497 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3498 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3499 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3500 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3501 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3502 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3503 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3504 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3505 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3506 }
3507
3508 static const StructEntry struct_termios_def = {
3509 .convert = { host_to_target_termios, target_to_host_termios },
3510 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3511 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3512 };
3513
3514 static bitmask_transtbl mmap_flags_tbl[] = {
3515 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3516 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3517 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3518 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3519 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3520 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3521 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3522 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3523 { 0, 0, 0, 0 }
3524 };
3525
3526 #if defined(TARGET_I386)
3527
3528 /* NOTE: there is really one LDT for all the threads */
3529 static uint8_t *ldt_table;
3530
3531 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3532 {
3533 int size;
3534 void *p;
3535
3536 if (!ldt_table)
3537 return 0;
3538 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3539 if (size > bytecount)
3540 size = bytecount;
3541 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3542 if (!p)
3543 return -TARGET_EFAULT;
3544 /* ??? Should this by byteswapped? */
3545 memcpy(p, ldt_table, size);
3546 unlock_user(p, ptr, size);
3547 return size;
3548 }
3549
3550 /* XXX: add locking support */
3551 static abi_long write_ldt(CPUX86State *env,
3552 abi_ulong ptr, unsigned long bytecount, int oldmode)
3553 {
3554 struct target_modify_ldt_ldt_s ldt_info;
3555 struct target_modify_ldt_ldt_s *target_ldt_info;
3556 int seg_32bit, contents, read_exec_only, limit_in_pages;
3557 int seg_not_present, useable, lm;
3558 uint32_t *lp, entry_1, entry_2;
3559
3560 if (bytecount != sizeof(ldt_info))
3561 return -TARGET_EINVAL;
3562 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3563 return -TARGET_EFAULT;
3564 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3565 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3566 ldt_info.limit = tswap32(target_ldt_info->limit);
3567 ldt_info.flags = tswap32(target_ldt_info->flags);
3568 unlock_user_struct(target_ldt_info, ptr, 0);
3569
3570 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3571 return -TARGET_EINVAL;
3572 seg_32bit = ldt_info.flags & 1;
3573 contents = (ldt_info.flags >> 1) & 3;
3574 read_exec_only = (ldt_info.flags >> 3) & 1;
3575 limit_in_pages = (ldt_info.flags >> 4) & 1;
3576 seg_not_present = (ldt_info.flags >> 5) & 1;
3577 useable = (ldt_info.flags >> 6) & 1;
3578 #ifdef TARGET_ABI32
3579 lm = 0;
3580 #else
3581 lm = (ldt_info.flags >> 7) & 1;
3582 #endif
3583 if (contents == 3) {
3584 if (oldmode)
3585 return -TARGET_EINVAL;
3586 if (seg_not_present == 0)
3587 return -TARGET_EINVAL;
3588 }
3589 /* allocate the LDT */
3590 if (!ldt_table) {
3591 env->ldt.base = target_mmap(0,
3592 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3593 PROT_READ|PROT_WRITE,
3594 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3595 if (env->ldt.base == -1)
3596 return -TARGET_ENOMEM;
3597 memset(g2h(env->ldt.base), 0,
3598 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3599 env->ldt.limit = 0xffff;
3600 ldt_table = g2h(env->ldt.base);
3601 }
3602
3603 /* NOTE: same code as Linux kernel */
3604 /* Allow LDTs to be cleared by the user. */
3605 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3606 if (oldmode ||
3607 (contents == 0 &&
3608 read_exec_only == 1 &&
3609 seg_32bit == 0 &&
3610 limit_in_pages == 0 &&
3611 seg_not_present == 1 &&
3612 useable == 0 )) {
3613 entry_1 = 0;
3614 entry_2 = 0;
3615 goto install;
3616 }
3617 }
3618
3619 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3620 (ldt_info.limit & 0x0ffff);
3621 entry_2 = (ldt_info.base_addr & 0xff000000) |
3622 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3623 (ldt_info.limit & 0xf0000) |
3624 ((read_exec_only ^ 1) << 9) |
3625 (contents << 10) |
3626 ((seg_not_present ^ 1) << 15) |
3627 (seg_32bit << 22) |
3628 (limit_in_pages << 23) |
3629 (lm << 21) |
3630 0x7000;
3631 if (!oldmode)
3632 entry_2 |= (useable << 20);
3633
3634 /* Install the new entry ... */
3635 install:
3636 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3637 lp[0] = tswap32(entry_1);
3638 lp[1] = tswap32(entry_2);
3639 return 0;
3640 }
3641
3642 /* specific and weird i386 syscalls */
3643 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3644 unsigned long bytecount)
3645 {
3646 abi_long ret;
3647
3648 switch (func) {
3649 case 0:
3650 ret = read_ldt(ptr, bytecount);
3651 break;
3652 case 1:
3653 ret = write_ldt(env, ptr, bytecount, 1);
3654 break;
3655 case 0x11:
3656 ret = write_ldt(env, ptr, bytecount, 0);
3657 break;
3658 default:
3659 ret = -TARGET_ENOSYS;
3660 break;
3661 }
3662 return ret;
3663 }
3664
3665 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3666 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3667 {
3668 uint64_t *gdt_table = g2h(env->gdt.base);
3669 struct target_modify_ldt_ldt_s ldt_info;
3670 struct target_modify_ldt_ldt_s *target_ldt_info;
3671 int seg_32bit, contents, read_exec_only, limit_in_pages;
3672 int seg_not_present, useable, lm;
3673 uint32_t *lp, entry_1, entry_2;
3674 int i;
3675
3676 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3677 if (!target_ldt_info)
3678 return -TARGET_EFAULT;
3679 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3680 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3681 ldt_info.limit = tswap32(target_ldt_info->limit);
3682 ldt_info.flags = tswap32(target_ldt_info->flags);
3683 if (ldt_info.entry_number == -1) {
3684 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3685 if (gdt_table[i] == 0) {
3686 ldt_info.entry_number = i;
3687 target_ldt_info->entry_number = tswap32(i);
3688 break;
3689 }
3690 }
3691 }
3692 unlock_user_struct(target_ldt_info, ptr, 1);
3693
3694 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3695 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3696 return -TARGET_EINVAL;
3697 seg_32bit = ldt_info.flags & 1;
3698 contents = (ldt_info.flags >> 1) & 3;
3699 read_exec_only = (ldt_info.flags >> 3) & 1;
3700 limit_in_pages = (ldt_info.flags >> 4) & 1;
3701 seg_not_present = (ldt_info.flags >> 5) & 1;
3702 useable = (ldt_info.flags >> 6) & 1;
3703 #ifdef TARGET_ABI32
3704 lm = 0;
3705 #else
3706 lm = (ldt_info.flags >> 7) & 1;
3707 #endif
3708
3709 if (contents == 3) {
3710 if (seg_not_present == 0)
3711 return -TARGET_EINVAL;
3712 }
3713
3714 /* NOTE: same code as Linux kernel */
3715 /* Allow LDTs to be cleared by the user. */
3716 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3717 if ((contents == 0 &&
3718 read_exec_only == 1 &&
3719 seg_32bit == 0 &&
3720 limit_in_pages == 0 &&
3721 seg_not_present == 1 &&
3722 useable == 0 )) {
3723 entry_1 = 0;
3724 entry_2 = 0;
3725 goto install;
3726 }
3727 }
3728
3729 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3730 (ldt_info.limit & 0x0ffff);
3731 entry_2 = (ldt_info.base_addr & 0xff000000) |
3732 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3733 (ldt_info.limit & 0xf0000) |
3734 ((read_exec_only ^ 1) << 9) |
3735 (contents << 10) |
3736 ((seg_not_present ^ 1) << 15) |
3737 (seg_32bit << 22) |
3738 (limit_in_pages << 23) |
3739 (useable << 20) |
3740 (lm << 21) |
3741 0x7000;
3742
3743 /* Install the new entry ... */
3744 install:
3745 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3746 lp[0] = tswap32(entry_1);
3747 lp[1] = tswap32(entry_2);
3748 return 0;
3749 }
3750
3751 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3752 {
3753 struct target_modify_ldt_ldt_s *target_ldt_info;
3754 uint64_t *gdt_table = g2h(env->gdt.base);
3755 uint32_t base_addr, limit, flags;
3756 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3757 int seg_not_present, useable, lm;
3758 uint32_t *lp, entry_1, entry_2;
3759
3760 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3761 if (!target_ldt_info)
3762 return -TARGET_EFAULT;
3763 idx = tswap32(target_ldt_info->entry_number);
3764 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3765 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3766 unlock_user_struct(target_ldt_info, ptr, 1);
3767 return -TARGET_EINVAL;
3768 }
3769 lp = (uint32_t *)(gdt_table + idx);
3770 entry_1 = tswap32(lp[0]);
3771 entry_2 = tswap32(lp[1]);
3772
3773 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3774 contents = (entry_2 >> 10) & 3;
3775 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3776 seg_32bit = (entry_2 >> 22) & 1;
3777 limit_in_pages = (entry_2 >> 23) & 1;
3778 useable = (entry_2 >> 20) & 1;
3779 #ifdef TARGET_ABI32
3780 lm = 0;
3781 #else
3782 lm = (entry_2 >> 21) & 1;
3783 #endif
3784 flags = (seg_32bit << 0) | (contents << 1) |
3785 (read_exec_only << 3) | (limit_in_pages << 4) |
3786 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3787 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3788 base_addr = (entry_1 >> 16) |
3789 (entry_2 & 0xff000000) |
3790 ((entry_2 & 0xff) << 16);
3791 target_ldt_info->base_addr = tswapl(base_addr);
3792 target_ldt_info->limit = tswap32(limit);
3793 target_ldt_info->flags = tswap32(flags);
3794 unlock_user_struct(target_ldt_info, ptr, 1);
3795 return 0;
3796 }
3797 #endif /* TARGET_I386 && TARGET_ABI32 */
3798
3799 #ifndef TARGET_ABI32
3800 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3801 {
3802 abi_long ret;
3803 abi_ulong val;
3804 int idx;
3805
3806 switch(code) {
3807 case TARGET_ARCH_SET_GS:
3808 case TARGET_ARCH_SET_FS:
3809 if (code == TARGET_ARCH_SET_GS)
3810 idx = R_GS;
3811 else
3812 idx = R_FS;
3813 cpu_x86_load_seg(env, idx, 0);
3814 env->segs[idx].base = addr;
3815 break;
3816 case TARGET_ARCH_GET_GS:
3817 case TARGET_ARCH_GET_FS:
3818 if (code == TARGET_ARCH_GET_GS)
3819 idx = R_GS;
3820 else
3821 idx = R_FS;
3822 val = env->segs[idx].base;
3823 if (put_user(val, addr, abi_ulong))
3824 return -TARGET_EFAULT;
3825 break;
3826 default:
3827 ret = -TARGET_EINVAL;
3828 break;
3829 }
3830 return 0;
3831 }
3832 #endif
3833
3834 #endif /* defined(TARGET_I386) */
3835
3836 #define NEW_STACK_SIZE 0x40000
3837
3838 #if defined(CONFIG_USE_NPTL)
3839
3840 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3841 typedef struct {
3842 CPUState *env;
3843 pthread_mutex_t mutex;
3844 pthread_cond_t cond;
3845 pthread_t thread;
3846 uint32_t tid;
3847 abi_ulong child_tidptr;
3848 abi_ulong parent_tidptr;
3849 sigset_t sigmask;
3850 } new_thread_info;
3851
3852 static void *clone_func(void *arg)
3853 {
3854 new_thread_info *info = arg;
3855 CPUState *env;
3856 TaskState *ts;
3857
3858 env = info->env;
3859 thread_env = env;
3860 ts = (TaskState *)thread_env->opaque;
3861 info->tid = gettid();
3862 env->host_tid = info->tid;
3863 task_settid(ts);
3864 if (info->child_tidptr)
3865 put_user_u32(info->tid, info->child_tidptr);
3866 if (info->parent_tidptr)
3867 put_user_u32(info->tid, info->parent_tidptr);
3868 /* Enable signals. */
3869 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3870 /* Signal to the parent that we're ready. */
3871 pthread_mutex_lock(&info->mutex);
3872 pthread_cond_broadcast(&info->cond);
3873 pthread_mutex_unlock(&info->mutex);
3874 /* Wait until the parent has finshed initializing the tls state. */
3875 pthread_mutex_lock(&clone_lock);
3876 pthread_mutex_unlock(&clone_lock);
3877 cpu_loop(env);
3878 /* never exits */
3879 return NULL;
3880 }
3881 #else
3882
3883 static int clone_func(void *arg)
3884 {
3885 CPUState *env = arg;
3886 cpu_loop(env);
3887 /* never exits */
3888 return 0;
3889 }
3890 #endif
3891
3892 /* do_fork() Must return host values and target errnos (unlike most
3893 do_*() functions). */
3894 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3895 abi_ulong parent_tidptr, target_ulong newtls,
3896 abi_ulong child_tidptr)
3897 {
3898 int ret;
3899 TaskState *ts;
3900 CPUState *new_env;
3901 #if defined(CONFIG_USE_NPTL)
3902 unsigned int nptl_flags;
3903 sigset_t sigmask;
3904 #else
3905 uint8_t *new_stack;
3906 #endif
3907
3908 /* Emulate vfork() with fork() */
3909 if (flags & CLONE_VFORK)
3910 flags &= ~(CLONE_VFORK | CLONE_VM);
3911
3912 if (flags & CLONE_VM) {
3913 TaskState *parent_ts = (TaskState *)env->opaque;
3914 #if defined(CONFIG_USE_NPTL)
3915 new_thread_info info;
3916 pthread_attr_t attr;
3917 #endif
3918 ts = qemu_mallocz(sizeof(TaskState));
3919 init_task_state(ts);
3920 /* we create a new CPU instance. */
3921 new_env = cpu_copy(env);
3922 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3923 cpu_reset(new_env);
3924 #endif
3925 /* Init regs that differ from the parent. */
3926 cpu_clone_regs(new_env, newsp);
3927 new_env->opaque = ts;
3928 ts->bprm = parent_ts->bprm;
3929 ts->info = parent_ts->info;
3930 #if defined(CONFIG_USE_NPTL)
3931 nptl_flags = flags;
3932 flags &= ~CLONE_NPTL_FLAGS2;
3933
3934 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3935 ts->child_tidptr = child_tidptr;
3936 }
3937
3938 if (nptl_flags & CLONE_SETTLS)
3939 cpu_set_tls (new_env, newtls);
3940
3941 /* Grab a mutex so that thread setup appears atomic. */
3942 pthread_mutex_lock(&clone_lock);
3943
3944 memset(&info, 0, sizeof(info));
3945 pthread_mutex_init(&info.mutex, NULL);
3946 pthread_mutex_lock(&info.mutex);
3947 pthread_cond_init(&info.cond, NULL);
3948 info.env = new_env;
3949 if (nptl_flags & CLONE_CHILD_SETTID)
3950 info.child_tidptr = child_tidptr;
3951 if (nptl_flags & CLONE_PARENT_SETTID)
3952 info.parent_tidptr = parent_tidptr;
3953
3954 ret = pthread_attr_init(&attr);
3955 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
3956 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
3957 /* It is not safe to deliver signals until the child has finished
3958 initializing, so temporarily block all signals. */
3959 sigfillset(&sigmask);
3960 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3961
3962 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3963 /* TODO: Free new CPU state if thread creation failed. */
3964
3965 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3966 pthread_attr_destroy(&attr);
3967 if (ret == 0) {
3968 /* Wait for the child to initialize. */
3969 pthread_cond_wait(&info.cond, &info.mutex);
3970 ret = info.tid;
3971 if (flags & CLONE_PARENT_SETTID)
3972 put_user_u32(ret, parent_tidptr);
3973 } else {
3974 ret = -1;
3975 }
3976 pthread_mutex_unlock(&info.mutex);
3977 pthread_cond_destroy(&info.cond);
3978 pthread_mutex_destroy(&info.mutex);
3979 pthread_mutex_unlock(&clone_lock);
3980 #else
3981 if (flags & CLONE_NPTL_FLAGS2)
3982 return -EINVAL;
3983 /* This is probably going to die very quickly, but do it anyway. */
3984 new_stack = qemu_mallocz (NEW_STACK_SIZE);
3985 #ifdef __ia64__
3986 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
3987 #else
3988 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3989 #endif
3990 #endif
3991 } else {
3992 /* if no CLONE_VM, we consider it is a fork */
3993 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3994 return -EINVAL;
3995 fork_start();
3996 ret = fork();
3997 if (ret == 0) {
3998 /* Child Process. */
3999 cpu_clone_regs(env, newsp);
4000 fork_end(1);
4001 #if defined(CONFIG_USE_NPTL)
4002 /* There is a race condition here. The parent process could
4003 theoretically read the TID in the child process before the child
4004 tid is set. This would require using either ptrace
4005 (not implemented) or having *_tidptr to point at a shared memory
4006 mapping. We can't repeat the spinlock hack used above because
4007 the child process gets its own copy of the lock. */
4008 if (flags & CLONE_CHILD_SETTID)
4009 put_user_u32(gettid(), child_tidptr);
4010 if (flags & CLONE_PARENT_SETTID)
4011 put_user_u32(gettid(), parent_tidptr);
4012 ts = (TaskState *)env->opaque;
4013 if (flags & CLONE_SETTLS)
4014 cpu_set_tls (env, newtls);
4015 if (flags & CLONE_CHILD_CLEARTID)
4016 ts->child_tidptr = child_tidptr;
4017 #endif
4018 } else {
4019 fork_end(0);
4020 }
4021 }
4022 return ret;
4023 }
4024
4025 /* warning : doesn't handle linux specific flags... */
4026 static int target_to_host_fcntl_cmd(int cmd)
4027 {
4028 switch(cmd) {
4029 case TARGET_F_DUPFD:
4030 case TARGET_F_GETFD:
4031 case TARGET_F_SETFD:
4032 case TARGET_F_GETFL:
4033 case TARGET_F_SETFL:
4034 return cmd;
4035 case TARGET_F_GETLK:
4036 return F_GETLK;
4037 case TARGET_F_SETLK:
4038 return F_SETLK;
4039 case TARGET_F_SETLKW:
4040 return F_SETLKW;
4041 case TARGET_F_GETOWN:
4042 return F_GETOWN;
4043 case TARGET_F_SETOWN:
4044 return F_SETOWN;
4045 case TARGET_F_GETSIG:
4046 return F_GETSIG;
4047 case TARGET_F_SETSIG:
4048 return F_SETSIG;
4049 #if TARGET_ABI_BITS == 32
4050 case TARGET_F_GETLK64:
4051 return F_GETLK64;
4052 case TARGET_F_SETLK64:
4053 return F_SETLK64;
4054 case TARGET_F_SETLKW64:
4055 return F_SETLKW64;
4056 #endif
4057 case TARGET_F_SETLEASE:
4058 return F_SETLEASE;
4059 case TARGET_F_GETLEASE:
4060 return F_GETLEASE;
4061 #ifdef F_DUPFD_CLOEXEC
4062 case TARGET_F_DUPFD_CLOEXEC:
4063 return F_DUPFD_CLOEXEC;
4064 #endif
4065 case TARGET_F_NOTIFY:
4066 return F_NOTIFY;
4067 default:
4068 return -TARGET_EINVAL;
4069 }
4070 return -TARGET_EINVAL;
4071 }
4072
4073 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4074 {
4075 struct flock fl;
4076 struct target_flock *target_fl;
4077 struct flock64 fl64;
4078 struct target_flock64 *target_fl64;
4079 abi_long ret;
4080 int host_cmd = target_to_host_fcntl_cmd(cmd);
4081
4082 if (host_cmd == -TARGET_EINVAL)
4083 return host_cmd;
4084
4085 switch(cmd) {
4086 case TARGET_F_GETLK:
4087 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4088 return -TARGET_EFAULT;
4089 fl.l_type = tswap16(target_fl->l_type);
4090 fl.l_whence = tswap16(target_fl->l_whence);
4091 fl.l_start = tswapl(target_fl->l_start);
4092 fl.l_len = tswapl(target_fl->l_len);
4093 fl.l_pid = tswap32(target_fl->l_pid);
4094 unlock_user_struct(target_fl, arg, 0);
4095 ret = get_errno(fcntl(fd, host_cmd, &fl));
4096 if (ret == 0) {
4097 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4098 return -TARGET_EFAULT;
4099 target_fl->l_type = tswap16(fl.l_type);
4100 target_fl->l_whence = tswap16(fl.l_whence);
4101 target_fl->l_start = tswapl(fl.l_start);
4102 target_fl->l_len = tswapl(fl.l_len);
4103 target_fl->l_pid = tswap32(fl.l_pid);
4104 unlock_user_struct(target_fl, arg, 1);
4105 }
4106 break;
4107
4108 case TARGET_F_SETLK:
4109 case TARGET_F_SETLKW:
4110 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4111 return -TARGET_EFAULT;
4112 fl.l_type = tswap16(target_fl->l_type);
4113 fl.l_whence = tswap16(target_fl->l_whence);
4114 fl.l_start = tswapl(target_fl->l_start);
4115 fl.l_len = tswapl(target_fl->l_len);
4116 fl.l_pid = tswap32(target_fl->l_pid);
4117 unlock_user_struct(target_fl, arg, 0);
4118 ret = get_errno(fcntl(fd, host_cmd, &fl));
4119 break;
4120
4121 case TARGET_F_GETLK64:
4122 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4123 return -TARGET_EFAULT;
4124 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4125 fl64.l_whence = tswap16(target_fl64->l_whence);
4126 fl64.l_start = tswapl(target_fl64->l_start);
4127 fl64.l_len = tswapl(target_fl64->l_len);
4128 fl64.l_pid = tswap32(target_fl64->l_pid);
4129 unlock_user_struct(target_fl64, arg, 0);
4130 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4131 if (ret == 0) {
4132 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4133 return -TARGET_EFAULT;
4134 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4135 target_fl64->l_whence = tswap16(fl64.l_whence);
4136 target_fl64->l_start = tswapl(fl64.l_start);
4137 target_fl64->l_len = tswapl(fl64.l_len);
4138 target_fl64->l_pid = tswap32(fl64.l_pid);
4139 unlock_user_struct(target_fl64, arg, 1);
4140 }
4141 break;
4142 case TARGET_F_SETLK64:
4143 case TARGET_F_SETLKW64:
4144 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4145 return -TARGET_EFAULT;
4146 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4147 fl64.l_whence = tswap16(target_fl64->l_whence);
4148 fl64.l_start = tswapl(target_fl64->l_start);
4149 fl64.l_len = tswapl(target_fl64->l_len);
4150 fl64.l_pid = tswap32(target_fl64->l_pid);
4151 unlock_user_struct(target_fl64, arg, 0);
4152 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4153 break;
4154
4155 case TARGET_F_GETFL:
4156 ret = get_errno(fcntl(fd, host_cmd, arg));
4157 if (ret >= 0) {
4158 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4159 }
4160 break;
4161
4162 case TARGET_F_SETFL:
4163 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4164 break;
4165
4166 case TARGET_F_SETOWN:
4167 case TARGET_F_GETOWN:
4168 case TARGET_F_SETSIG:
4169 case TARGET_F_GETSIG:
4170 case TARGET_F_SETLEASE:
4171 case TARGET_F_GETLEASE:
4172 ret = get_errno(fcntl(fd, host_cmd, arg));
4173 break;
4174
4175 default:
4176 ret = get_errno(fcntl(fd, cmd, arg));
4177 break;
4178 }
4179 return ret;
4180 }
4181
4182 #ifdef USE_UID16
4183
4184 static inline int high2lowuid(int uid)
4185 {
4186 if (uid > 65535)
4187 return 65534;
4188 else
4189 return uid;
4190 }
4191
4192 static inline int high2lowgid(int gid)
4193 {
4194 if (gid > 65535)
4195 return 65534;
4196 else
4197 return gid;
4198 }
4199
4200 static inline int low2highuid(int uid)
4201 {
4202 if ((int16_t)uid == -1)
4203 return -1;
4204 else
4205 return uid;
4206 }
4207
4208 static inline int low2highgid(int gid)
4209 {
4210 if ((int16_t)gid == -1)
4211 return -1;
4212 else
4213 return gid;
4214 }
4215 static inline int tswapid(int id)
4216 {
4217 return tswap16(id);
4218 }
4219 #else /* !USE_UID16 */
4220 static inline int high2lowuid(int uid)
4221 {
4222 return uid;
4223 }
4224 static inline int high2lowgid(int gid)
4225 {
4226 return gid;
4227 }
4228 static inline int low2highuid(int uid)
4229 {
4230 return uid;
4231 }
4232 static inline int low2highgid(int gid)
4233 {
4234 return gid;
4235 }
4236 static inline int tswapid(int id)
4237 {
4238 return tswap32(id);
4239 }
4240 #endif /* USE_UID16 */
4241
4242 void syscall_init(void)
4243 {
4244 IOCTLEntry *ie;
4245 const argtype *arg_type;
4246 int size;
4247 int i;
4248
4249 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4250 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4251 #include "syscall_types.h"
4252 #undef STRUCT
4253 #undef STRUCT_SPECIAL
4254
4255 /* we patch the ioctl size if necessary. We rely on the fact that
4256 no ioctl has all the bits at '1' in the size field */
4257 ie = ioctl_entries;
4258 while (ie->target_cmd != 0) {
4259 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4260 TARGET_IOC_SIZEMASK) {
4261 arg_type = ie->arg_type;
4262 if (arg_type[0] != TYPE_PTR) {
4263 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4264 ie->target_cmd);
4265 exit(1);
4266 }
4267 arg_type++;
4268 size = thunk_type_size(arg_type, 0);
4269 ie->target_cmd = (ie->target_cmd &
4270 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4271 (size << TARGET_IOC_SIZESHIFT);
4272 }
4273
4274 /* Build target_to_host_errno_table[] table from
4275 * host_to_target_errno_table[]. */
4276 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4277 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4278
4279 /* automatic consistency check if same arch */
4280 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4281 (defined(__x86_64__) && defined(TARGET_X86_64))
4282 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4283 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4284 ie->name, ie->target_cmd, ie->host_cmd);
4285 }
4286 #endif
4287 ie++;
4288 }
4289 }
4290
4291 #if TARGET_ABI_BITS == 32
4292 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4293 {
4294 #ifdef TARGET_WORDS_BIGENDIAN
4295 return ((uint64_t)word0 << 32) | word1;
4296 #else
4297 return ((uint64_t)word1 << 32) | word0;
4298 #endif
4299 }
4300 #else /* TARGET_ABI_BITS == 32 */
4301 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4302 {
4303 return word0;
4304 }
4305 #endif /* TARGET_ABI_BITS != 32 */
4306
4307 #ifdef TARGET_NR_truncate64
4308 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4309 abi_long arg2,
4310 abi_long arg3,
4311 abi_long arg4)
4312 {
4313 #ifdef TARGET_ARM
4314 if (((CPUARMState *)cpu_env)->eabi)
4315 {
4316 arg2 = arg3;
4317 arg3 = arg4;
4318 }
4319 #endif
4320 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4321 }
4322 #endif
4323
4324 #ifdef TARGET_NR_ftruncate64
4325 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4326 abi_long arg2,
4327 abi_long arg3,
4328 abi_long arg4)
4329 {
4330 #ifdef TARGET_ARM
4331 if (((CPUARMState *)cpu_env)->eabi)
4332 {
4333 arg2 = arg3;
4334 arg3 = arg4;
4335 }
4336 #endif
4337 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4338 }
4339 #endif
4340
4341 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4342 abi_ulong target_addr)
4343 {
4344 struct target_timespec *target_ts;
4345
4346 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4347 return -TARGET_EFAULT;
4348 host_ts->tv_sec = tswapl(target_ts->tv_sec);
4349 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
4350 unlock_user_struct(target_ts, target_addr, 0);
4351 return 0;
4352 }
4353
4354 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4355 struct timespec *host_ts)
4356 {
4357 struct target_timespec *target_ts;
4358
4359 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4360 return -TARGET_EFAULT;
4361 target_ts->tv_sec = tswapl(host_ts->tv_sec);
4362 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
4363 unlock_user_struct(target_ts, target_addr, 1);
4364 return 0;
4365 }
4366
4367 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4368 static inline abi_long host_to_target_stat64(void *cpu_env,
4369 abi_ulong target_addr,
4370 struct stat *host_st)
4371 {
4372 #ifdef TARGET_ARM
4373 if (((CPUARMState *)cpu_env)->eabi) {
4374 struct target_eabi_stat64 *target_st;
4375
4376 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4377 return -TARGET_EFAULT;
4378 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4379 __put_user(host_st->st_dev, &target_st->st_dev);
4380 __put_user(host_st->st_ino, &target_st->st_ino);
4381 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4382 __put_user(host_st->st_ino, &target_st->__st_ino);
4383 #endif
4384 __put_user(host_st->st_mode, &target_st->st_mode);
4385 __put_user(host_st->st_nlink, &target_st->st_nlink);
4386 __put_user(host_st->st_uid, &target_st->st_uid);
4387 __put_user(host_st->st_gid, &target_st->st_gid);
4388 __put_user(host_st->st_rdev, &target_st->st_rdev);
4389 __put_user(host_st->st_size, &target_st->st_size);
4390 __put_user(host_st->st_blksize, &target_st->st_blksize);
4391 __put_user(host_st->st_blocks, &target_st->st_blocks);
4392 __put_user(host_st->st_atime, &target_st->target_st_atime);
4393 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4394 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4395 unlock_user_struct(target_st, target_addr, 1);
4396 } else
4397 #endif
4398 {
4399 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4400 struct target_stat *target_st;
4401 #else
4402 struct target_stat64 *target_st;
4403 #endif
4404
4405 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4406 return -TARGET_EFAULT;
4407 memset(target_st, 0, sizeof(*target_st));
4408 __put_user(host_st->st_dev, &target_st->st_dev);
4409 __put_user(host_st->st_ino, &target_st->st_ino);
4410 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4411 __put_user(host_st->st_ino, &target_st->__st_ino);
4412 #endif
4413 __put_user(host_st->st_mode, &target_st->st_mode);
4414 __put_user(host_st->st_nlink, &target_st->st_nlink);
4415 __put_user(host_st->st_uid, &target_st->st_uid);
4416 __put_user(host_st->st_gid, &target_st->st_gid);
4417 __put_user(host_st->st_rdev, &target_st->st_rdev);
4418 /* XXX: better use of kernel struct */
4419 __put_user(host_st->st_size, &target_st->st_size);
4420 __put_user(host_st->st_blksize, &target_st->st_blksize);
4421 __put_user(host_st->st_blocks, &target_st->st_blocks);
4422 __put_user(host_st->st_atime, &target_st->target_st_atime);
4423 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4424 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4425 unlock_user_struct(target_st, target_addr, 1);
4426 }
4427
4428 return 0;
4429 }
4430 #endif
4431
4432 #if defined(CONFIG_USE_NPTL)
4433 /* ??? Using host futex calls even when target atomic operations
4434 are not really atomic probably breaks things. However implementing
4435 futexes locally would make futexes shared between multiple processes
4436 tricky. However they're probably useless because guest atomic
4437 operations won't work either. */
4438 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4439 target_ulong uaddr2, int val3)
4440 {
4441 struct timespec ts, *pts;
4442 int base_op;
4443
4444 /* ??? We assume FUTEX_* constants are the same on both host
4445 and target. */
4446 #ifdef FUTEX_CMD_MASK
4447 base_op = op & FUTEX_CMD_MASK;
4448 #else
4449 base_op = op;
4450 #endif
4451 switch (base_op) {
4452 case FUTEX_WAIT:
4453 if (timeout) {
4454 pts = &ts;
4455 target_to_host_timespec(pts, timeout);
4456 } else {
4457 pts = NULL;
4458 }
4459 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4460 pts, NULL, 0));
4461 case FUTEX_WAKE:
4462 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4463 case FUTEX_FD:
4464 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4465 case FUTEX_REQUEUE:
4466 case FUTEX_CMP_REQUEUE:
4467 case FUTEX_WAKE_OP:
4468 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4469 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4470 But the prototype takes a `struct timespec *'; insert casts
4471 to satisfy the compiler. We do not need to tswap TIMEOUT
4472 since it's not compared to guest memory. */
4473 pts = (struct timespec *)(uintptr_t) timeout;
4474 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4475 g2h(uaddr2),
4476 (base_op == FUTEX_CMP_REQUEUE
4477 ? tswap32(val3)
4478 : val3)));
4479 default:
4480 return -TARGET_ENOSYS;
4481 }
4482 }
4483 #endif
4484
4485 /* Map host to target signal numbers for the wait family of syscalls.
4486 Assume all other status bits are the same. */
4487 static int host_to_target_waitstatus(int status)
4488 {
4489 if (WIFSIGNALED(status)) {
4490 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4491 }
4492 if (WIFSTOPPED(status)) {
4493 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4494 | (status & 0xff);
4495 }
4496 return status;
4497 }
4498
4499 int get_osversion(void)
4500 {
4501 static int osversion;
4502 struct new_utsname buf;
4503 const char *s;
4504 int i, n, tmp;
4505 if (osversion)
4506 return osversion;
4507 if (qemu_uname_release && *qemu_uname_release) {
4508 s = qemu_uname_release;
4509 } else {
4510 if (sys_uname(&buf))
4511 return 0;
4512 s = buf.release;
4513 }
4514 tmp = 0;
4515 for (i = 0; i < 3; i++) {
4516 n = 0;
4517 while (*s >= '0' && *s <= '9') {
4518 n *= 10;
4519 n += *s - '0';
4520 s++;
4521 }
4522 tmp = (tmp << 8) + n;
4523 if (*s == '.')
4524 s++;
4525 }
4526 osversion = tmp;
4527 return osversion;
4528 }
4529
4530 /* do_syscall() should always have a single exit point at the end so
4531 that actions, such as logging of syscall results, can be performed.
4532 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4533 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4534 abi_long arg2, abi_long arg3, abi_long arg4,
4535 abi_long arg5, abi_long arg6)
4536 {
4537 abi_long ret;
4538 struct stat st;
4539 struct statfs stfs;
4540 void *p;
4541
4542 #ifdef DEBUG
4543 gemu_log("syscall %d", num);
4544 #endif
4545 if(do_strace)
4546 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4547
4548 switch(num) {
4549 case TARGET_NR_exit:
4550 #ifdef CONFIG_USE_NPTL
4551 /* In old applications this may be used to implement _exit(2).
4552 However in threaded applictions it is used for thread termination,
4553 and _exit_group is used for application termination.
4554 Do thread termination if we have more then one thread. */
4555 /* FIXME: This probably breaks if a signal arrives. We should probably
4556 be disabling signals. */
4557 if (first_cpu->next_cpu) {
4558 TaskState *ts;
4559 CPUState **lastp;
4560 CPUState *p;
4561
4562 cpu_list_lock();
4563 lastp = &first_cpu;
4564 p = first_cpu;
4565 while (p && p != (CPUState *)cpu_env) {
4566 lastp = &p->next_cpu;
4567 p = p->next_cpu;
4568 }
4569 /* If we didn't find the CPU for this thread then something is
4570 horribly wrong. */
4571 if (!p)
4572 abort();
4573 /* Remove the CPU from the list. */
4574 *lastp = p->next_cpu;
4575 cpu_list_unlock();
4576 ts = ((CPUState *)cpu_env)->opaque;
4577 if (ts->child_tidptr) {
4578 put_user_u32(0, ts->child_tidptr);
4579 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4580 NULL, NULL, 0);
4581 }
4582 thread_env = NULL;
4583 qemu_free(cpu_env);
4584 qemu_free(ts);
4585 pthread_exit(NULL);
4586 }
4587 #endif
4588 #ifdef TARGET_GPROF
4589 _mcleanup();
4590 #endif
4591 gdb_exit(cpu_env, arg1);
4592 _exit(arg1);
4593 ret = 0; /* avoid warning */
4594 break;
4595 case TARGET_NR_read:
4596 if (arg3 == 0)
4597 ret = 0;
4598 else {
4599 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4600 goto efault;
4601 ret = get_errno(read(arg1, p, arg3));
4602 unlock_user(p, arg2, ret);
4603 }
4604 break;
4605 case TARGET_NR_write:
4606 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4607 goto efault;
4608 ret = get_errno(write(arg1, p, arg3));
4609 unlock_user(p, arg2, 0);
4610 break;
4611 case TARGET_NR_open:
4612 if (!(p = lock_user_string(arg1)))
4613 goto efault;
4614 ret = get_errno(open(path(p),
4615 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4616 arg3));
4617 unlock_user(p, arg1, 0);
4618 break;
4619 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4620 case TARGET_NR_openat:
4621 if (!(p = lock_user_string(arg2)))
4622 goto efault;
4623 ret = get_errno(sys_openat(arg1,
4624 path(p),
4625 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4626 arg4));
4627 unlock_user(p, arg2, 0);
4628 break;
4629 #endif
4630 case TARGET_NR_close:
4631 ret = get_errno(close(arg1));
4632 break;
4633 case TARGET_NR_brk:
4634 ret = do_brk(arg1);
4635 break;
4636 case TARGET_NR_fork:
4637 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4638 break;
4639 #ifdef TARGET_NR_waitpid
4640 case TARGET_NR_waitpid:
4641 {
4642 int status;
4643 ret = get_errno(waitpid(arg1, &status, arg3));
4644 if (!is_error(ret) && arg2
4645 && put_user_s32(host_to_target_waitstatus(status), arg2))
4646 goto efault;
4647 }
4648 break;
4649 #endif
4650 #ifdef TARGET_NR_waitid
4651 case TARGET_NR_waitid:
4652 {
4653 siginfo_t info;
4654 info.si_pid = 0;
4655 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4656 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4657 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4658 goto efault;
4659 host_to_target_siginfo(p, &info);
4660 unlock_user(p, arg3, sizeof(target_siginfo_t));
4661 }
4662 }
4663 break;
4664 #endif
4665 #ifdef TARGET_NR_creat /* not on alpha */
4666 case TARGET_NR_creat:
4667 if (!(p = lock_user_string(arg1)))
4668 goto efault;
4669 ret = get_errno(creat(p, arg2));
4670 unlock_user(p, arg1, 0);
4671 break;
4672 #endif
4673 case TARGET_NR_link:
4674 {
4675 void * p2;
4676 p = lock_user_string(arg1);
4677 p2 = lock_user_string(arg2);
4678 if (!p || !p2)
4679 ret = -TARGET_EFAULT;
4680 else
4681 ret = get_errno(link(p, p2));
4682 unlock_user(p2, arg2, 0);
4683 unlock_user(p, arg1, 0);
4684 }
4685 break;
4686 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4687 case TARGET_NR_linkat:
4688 {
4689 void * p2 = NULL;
4690 if (!arg2 || !arg4)
4691 goto efault;
4692 p = lock_user_string(arg2);
4693 p2 = lock_user_string(arg4);
4694 if (!p || !p2)
4695 ret = -TARGET_EFAULT;
4696 else
4697 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4698 unlock_user(p, arg2, 0);
4699 unlock_user(p2, arg4, 0);
4700 }
4701 break;
4702 #endif
4703 case TARGET_NR_unlink:
4704 if (!(p = lock_user_string(arg1)))
4705 goto efault;
4706 ret = get_errno(unlink(p));
4707 unlock_user(p, arg1, 0);
4708 break;
4709 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4710 case TARGET_NR_unlinkat:
4711 if (!(p = lock_user_string(arg2)))
4712 goto efault;
4713 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4714 unlock_user(p, arg2, 0);
4715 break;
4716 #endif
4717 case TARGET_NR_execve:
4718 {
4719 char **argp, **envp;
4720 int argc, envc;
4721 abi_ulong gp;
4722 abi_ulong guest_argp;
4723 abi_ulong guest_envp;
4724 abi_ulong addr;
4725 char **q;
4726
4727 argc = 0;
4728 guest_argp = arg2;
4729 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4730 if (get_user_ual(addr, gp))
4731 goto efault;
4732 if (!addr)
4733 break;
4734 argc++;
4735 }
4736 envc = 0;
4737 guest_envp = arg3;
4738 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4739 if (get_user_ual(addr, gp))
4740 goto efault;
4741 if (!addr)
4742 break;
4743 envc++;
4744 }
4745
4746 argp = alloca((argc + 1) * sizeof(void *));
4747 envp = alloca((envc + 1) * sizeof(void *));
4748
4749 for (gp = guest_argp, q = argp; gp;
4750 gp += sizeof(abi_ulong), q++) {
4751 if (get_user_ual(addr, gp))
4752 goto execve_efault;
4753 if (!addr)
4754 break;
4755 if (!(*q = lock_user_string(addr)))
4756 goto execve_efault;
4757 }
4758 *q = NULL;
4759
4760 for (gp = guest_envp, q = envp; gp;
4761 gp += sizeof(abi_ulong), q++) {
4762 if (get_user_ual(addr, gp))
4763 goto execve_efault;
4764 if (!addr)
4765 break;
4766 if (!(*q = lock_user_string(addr)))
4767 goto execve_efault;
4768 }
4769 *q = NULL;
4770
4771 if (!(p = lock_user_string(arg1)))
4772 goto execve_efault;
4773 ret = get_errno(execve(p, argp, envp));
4774 unlock_user(p, arg1, 0);
4775
4776 goto execve_end;
4777
4778 execve_efault:
4779 ret = -TARGET_EFAULT;
4780
4781 execve_end:
4782 for (gp = guest_argp, q = argp; *q;
4783 gp += sizeof(abi_ulong), q++) {
4784 if (get_user_ual(addr, gp)
4785 || !addr)
4786 break;
4787 unlock_user(*q, addr, 0);
4788 }
4789 for (gp = guest_envp, q = envp; *q;
4790 gp += sizeof(abi_ulong), q++) {
4791 if (get_user_ual(addr, gp)
4792 || !addr)
4793 break;
4794 unlock_user(*q, addr, 0);
4795 }
4796 }
4797 break;
4798 case TARGET_NR_chdir:
4799 if (!(p = lock_user_string(arg1)))
4800 goto efault;
4801 ret = get_errno(chdir(p));
4802 unlock_user(p, arg1, 0);
4803 break;
4804 #ifdef TARGET_NR_time
4805 case TARGET_NR_time:
4806 {
4807 time_t host_time;
4808 ret = get_errno(time(&host_time));
4809 if (!is_error(ret)
4810 && arg1
4811 && put_user_sal(host_time, arg1))
4812 goto efault;
4813 }
4814 break;
4815 #endif
4816 case TARGET_NR_mknod:
4817 if (!(p = lock_user_string(arg1)))
4818 goto efault;
4819 ret = get_errno(mknod(p, arg2, arg3));
4820 unlock_user(p, arg1, 0);
4821 break;
4822 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4823 case TARGET_NR_mknodat:
4824 if (!(p = lock_user_string(arg2)))
4825 goto efault;
4826 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4827 unlock_user(p, arg2, 0);
4828 break;
4829 #endif
4830 case TARGET_NR_chmod:
4831 if (!(p = lock_user_string(arg1)))
4832 goto efault;
4833 ret = get_errno(chmod(p, arg2));
4834 unlock_user(p, arg1, 0);
4835 break;
4836 #ifdef TARGET_NR_break
4837 case TARGET_NR_break:
4838 goto unimplemented;
4839 #endif
4840 #ifdef TARGET_NR_oldstat
4841 case TARGET_NR_oldstat:
4842 goto unimplemented;
4843 #endif
4844 case TARGET_NR_lseek:
4845 ret = get_errno(lseek(arg1, arg2, arg3));
4846 break;
4847 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4848 /* Alpha specific */
4849 case TARGET_NR_getxpid:
4850 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4851 ret = get_errno(getpid());
4852 break;
4853 #endif
4854 #ifdef TARGET_NR_getpid
4855 case TARGET_NR_getpid:
4856 ret = get_errno(getpid());
4857 break;
4858 #endif
4859 case TARGET_NR_mount:
4860 {
4861 /* need to look at the data field */
4862 void *p2, *p3;
4863 p = lock_user_string(arg1);
4864 p2 = lock_user_string(arg2);
4865 p3 = lock_user_string(arg3);
4866 if (!p || !p2 || !p3)
4867 ret = -TARGET_EFAULT;
4868 else {
4869 /* FIXME - arg5 should be locked, but it isn't clear how to
4870 * do that since it's not guaranteed to be a NULL-terminated
4871 * string.
4872 */
4873 if ( ! arg5 )
4874 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
4875 else
4876 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4877 }
4878 unlock_user(p, arg1, 0);
4879 unlock_user(p2, arg2, 0);
4880 unlock_user(p3, arg3, 0);
4881 break;
4882 }
4883 #ifdef TARGET_NR_umount
4884 case TARGET_NR_umount:
4885 if (!(p = lock_user_string(arg1)))
4886 goto efault;
4887 ret = get_errno(umount(p));
4888 unlock_user(p, arg1, 0);
4889 break;
4890 #endif
4891 #ifdef TARGET_NR_stime /* not on alpha */
4892 case TARGET_NR_stime:
4893 {
4894 time_t host_time;
4895 if (get_user_sal(host_time, arg1))
4896 goto efault;
4897 ret = get_errno(stime(&host_time));
4898 }
4899 break;
4900 #endif
4901 case TARGET_NR_ptrace:
4902 goto unimplemented;
4903 #ifdef TARGET_NR_alarm /* not on alpha */
4904 case TARGET_NR_alarm:
4905 ret = alarm(arg1);
4906 break;
4907 #endif
4908 #ifdef TARGET_NR_oldfstat
4909 case TARGET_NR_oldfstat:
4910 goto unimplemented;
4911 #endif
4912 #ifdef TARGET_NR_pause /* not on alpha */
4913 case TARGET_NR_pause:
4914 ret = get_errno(pause());
4915 break;
4916 #endif
4917 #ifdef TARGET_NR_utime
4918 case TARGET_NR_utime:
4919 {
4920 struct utimbuf tbuf, *host_tbuf;
4921 struct target_utimbuf *target_tbuf;
4922 if (arg2) {
4923 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4924 goto efault;
4925 tbuf.actime = tswapl(target_tbuf->actime);
4926 tbuf.modtime = tswapl(target_tbuf->modtime);
4927 unlock_user_struct(target_tbuf, arg2, 0);
4928 host_tbuf = &tbuf;
4929 } else {
4930 host_tbuf = NULL;
4931 }
4932 if (!(p = lock_user_string(arg1)))
4933 goto efault;
4934 ret = get_errno(utime(p, host_tbuf));
4935 unlock_user(p, arg1, 0);
4936 }
4937 break;
4938 #endif
4939 case TARGET_NR_utimes:
4940 {
4941 struct timeval *tvp, tv[2];
4942 if (arg2) {
4943 if (copy_from_user_timeval(&tv[0], arg2)
4944 || copy_from_user_timeval(&tv[1],
4945 arg2 + sizeof(struct target_timeval)))
4946 goto efault;
4947 tvp = tv;
4948 } else {
4949 tvp = NULL;
4950 }
4951 if (!(p = lock_user_string(arg1)))
4952 goto efault;
4953 ret = get_errno(utimes(p, tvp));
4954 unlock_user(p, arg1, 0);
4955 }
4956 break;
4957 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4958 case TARGET_NR_futimesat:
4959 {
4960 struct timeval *tvp, tv[2];
4961 if (arg3) {
4962 if (copy_from_user_timeval(&tv[0], arg3)
4963 || copy_from_user_timeval(&tv[1],
4964 arg3 + sizeof(struct target_timeval)))
4965 goto efault;
4966 tvp = tv;
4967 } else {
4968 tvp = NULL;
4969 }
4970 if (!(p = lock_user_string(arg2)))
4971 goto efault;
4972 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4973 unlock_user(p, arg2, 0);
4974 }
4975 break;
4976 #endif
4977 #ifdef TARGET_NR_stty
4978 case TARGET_NR_stty:
4979 goto unimplemented;
4980 #endif
4981 #ifdef TARGET_NR_gtty
4982 case TARGET_NR_gtty:
4983 goto unimplemented;
4984 #endif
4985 case TARGET_NR_access:
4986 if (!(p = lock_user_string(arg1)))
4987 goto efault;
4988 ret = get_errno(access(path(p), arg2));
4989 unlock_user(p, arg1, 0);
4990 break;
4991 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4992 case TARGET_NR_faccessat:
4993 if (!(p = lock_user_string(arg2)))
4994 goto efault;
4995 ret = get_errno(sys_faccessat(arg1, p, arg3));
4996 unlock_user(p, arg2, 0);
4997 break;
4998 #endif
4999 #ifdef TARGET_NR_nice /* not on alpha */
5000 case TARGET_NR_nice:
5001 ret = get_errno(nice(arg1));
5002 break;
5003 #endif
5004 #ifdef TARGET_NR_ftime
5005 case TARGET_NR_ftime:
5006 goto unimplemented;
5007 #endif
5008 case TARGET_NR_sync:
5009 sync();
5010 ret = 0;
5011 break;
5012 case TARGET_NR_kill:
5013 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5014 break;
5015 case TARGET_NR_rename:
5016 {
5017 void *p2;
5018 p = lock_user_string(arg1);
5019 p2 = lock_user_string(arg2);
5020 if (!p || !p2)
5021 ret = -TARGET_EFAULT;
5022 else
5023 ret = get_errno(rename(p, p2));
5024 unlock_user(p2, arg2, 0);
5025 unlock_user(p, arg1, 0);
5026 }
5027 break;
5028 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5029 case TARGET_NR_renameat:
5030 {
5031 void *p2;
5032 p = lock_user_string(arg2);
5033 p2 = lock_user_string(arg4);
5034 if (!p || !p2)
5035 ret = -TARGET_EFAULT;
5036 else
5037 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5038 unlock_user(p2, arg4, 0);
5039 unlock_user(p, arg2, 0);
5040 }
5041 break;
5042 #endif
5043 case TARGET_NR_mkdir:
5044 if (!(p = lock_user_string(arg1)))
5045 goto efault;
5046 ret = get_errno(mkdir(p, arg2));
5047 unlock_user(p, arg1, 0);
5048 break;
5049 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5050 case TARGET_NR_mkdirat:
5051 if (!(p = lock_user_string(arg2)))
5052 goto efault;
5053 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5054 unlock_user(p, arg2, 0);
5055 break;
5056 #endif
5057 case TARGET_NR_rmdir:
5058 if (!(p = lock_user_string(arg1)))
5059 goto efault;
5060 ret = get_errno(rmdir(p));
5061 unlock_user(p, arg1, 0);
5062 break;
5063 case TARGET_NR_dup:
5064 ret = get_errno(dup(arg1));
5065 break;
5066 case TARGET_NR_pipe:
5067 ret = do_pipe(cpu_env, arg1, 0, 0);
5068 break;
5069 #ifdef TARGET_NR_pipe2
5070 case TARGET_NR_pipe2:
5071 ret = do_pipe(cpu_env, arg1, arg2, 1);
5072 break;
5073 #endif
5074 case TARGET_NR_times:
5075 {
5076 struct target_tms *tmsp;
5077 struct tms tms;
5078 ret = get_errno(times(&tms));
5079 if (arg1) {
5080 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5081 if (!tmsp)
5082 goto efault;
5083 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
5084 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
5085 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
5086 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
5087 }
5088 if (!is_error(ret))
5089 ret = host_to_target_clock_t(ret);
5090 }
5091 break;
5092 #ifdef TARGET_NR_prof
5093 case TARGET_NR_prof:
5094 goto unimplemented;
5095 #endif
5096 #ifdef TARGET_NR_signal
5097 case TARGET_NR_signal:
5098 goto unimplemented;
5099 #endif
5100 case TARGET_NR_acct:
5101 if (arg1 == 0) {
5102 ret = get_errno(acct(NULL));
5103 } else {
5104 if (!(p = lock_user_string(arg1)))
5105 goto efault;
5106 ret = get_errno(acct(path(p)));
5107 unlock_user(p, arg1, 0);
5108 }
5109 break;
5110 #ifdef TARGET_NR_umount2 /* not on alpha */
5111 case TARGET_NR_umount2:
5112 if (!(p = lock_user_string(arg1)))
5113 goto efault;
5114 ret = get_errno(umount2(p, arg2));
5115 unlock_user(p, arg1, 0);
5116 break;
5117 #endif
5118 #ifdef TARGET_NR_lock
5119 case TARGET_NR_lock:
5120 goto unimplemented;
5121 #endif
5122 case TARGET_NR_ioctl:
5123 ret = do_ioctl(arg1, arg2, arg3);
5124 break;
5125 case TARGET_NR_fcntl:
5126 ret = do_fcntl(arg1, arg2, arg3);
5127 break;
5128 #ifdef TARGET_NR_mpx
5129 case TARGET_NR_mpx:
5130 goto unimplemented;
5131 #endif
5132 case TARGET_NR_setpgid:
5133 ret = get_errno(setpgid(arg1, arg2));
5134 break;
5135 #ifdef TARGET_NR_ulimit
5136 case TARGET_NR_ulimit:
5137 goto unimplemented;
5138 #endif
5139 #ifdef TARGET_NR_oldolduname
5140 case TARGET_NR_oldolduname:
5141 goto unimplemented;
5142 #endif
5143 case TARGET_NR_umask:
5144 ret = get_errno(umask(arg1));
5145 break;
5146 case TARGET_NR_chroot:
5147 if (!(p = lock_user_string(arg1)))
5148 goto efault;
5149 ret = get_errno(chroot(p));
5150 unlock_user(p, arg1, 0);
5151 break;
5152 case TARGET_NR_ustat:
5153 goto unimplemented;
5154 case TARGET_NR_dup2:
5155 ret = get_errno(dup2(arg1, arg2));
5156 break;
5157 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5158 case TARGET_NR_dup3:
5159 ret = get_errno(dup3(arg1, arg2, arg3));
5160 break;
5161 #endif
5162 #ifdef TARGET_NR_getppid /* not on alpha */
5163 case TARGET_NR_getppid:
5164 ret = get_errno(getppid());
5165 break;
5166 #endif
5167 case TARGET_NR_getpgrp:
5168 ret = get_errno(getpgrp());
5169 break;
5170 case TARGET_NR_setsid:
5171 ret = get_errno(setsid());
5172 break;
5173 #ifdef TARGET_NR_sigaction
5174 case TARGET_NR_sigaction:
5175 {
5176 #if defined(TARGET_ALPHA)
5177 struct target_sigaction act, oact, *pact = 0;
5178 struct target_old_sigaction *old_act;
5179 if (arg2) {
5180 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5181 goto efault;
5182 act._sa_handler = old_act->_sa_handler;
5183 target_siginitset(&act.sa_mask, old_act->sa_mask);
5184 act.sa_flags = old_act->sa_flags;
5185 act.sa_restorer = 0;
5186 unlock_user_struct(old_act, arg2, 0);
5187 pact = &act;
5188 }
5189 ret = get_errno(do_sigaction(arg1, pact, &oact));
5190 if (!is_error(ret) && arg3) {
5191 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5192 goto efault;
5193 old_act->_sa_handler = oact._sa_handler;
5194 old_act->sa_mask = oact.sa_mask.sig[0];
5195 old_act->sa_flags = oact.sa_flags;
5196 unlock_user_struct(old_act, arg3, 1);
5197 }
5198 #elif defined(TARGET_MIPS)
5199 struct target_sigaction act, oact, *pact, *old_act;
5200
5201 if (arg2) {
5202 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5203 goto efault;
5204 act._sa_handler = old_act->_sa_handler;
5205 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5206 act.sa_flags = old_act->sa_flags;
5207 unlock_user_struct(old_act, arg2, 0);
5208 pact = &act;
5209 } else {
5210 pact = NULL;
5211 }
5212
5213 ret = get_errno(do_sigaction(arg1, pact, &oact));
5214
5215 if (!is_error(ret) && arg3) {
5216 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5217 goto efault;
5218 old_act->_sa_handler = oact._sa_handler;
5219 old_act->sa_flags = oact.sa_flags;
5220 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5221 old_act->sa_mask.sig[1] = 0;
5222 old_act->sa_mask.sig[2] = 0;
5223 old_act->sa_mask.sig[3] = 0;
5224 unlock_user_struct(old_act, arg3, 1);
5225 }
5226 #else
5227 struct target_old_sigaction *old_act;
5228 struct target_sigaction act, oact, *pact;
5229 if (arg2) {
5230 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5231 goto efault;
5232 act._sa_handler = old_act->_sa_handler;
5233 target_siginitset(&act.sa_mask, old_act->sa_mask);
5234 act.sa_flags = old_act->sa_flags;
5235 act.sa_restorer = old_act->sa_restorer;
5236 unlock_user_struct(old_act, arg2, 0);
5237 pact = &act;
5238 } else {
5239 pact = NULL;
5240 }
5241 ret = get_errno(do_sigaction(arg1, pact, &oact));
5242 if (!is_error(ret) && arg3) {
5243 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5244 goto efault;
5245 old_act->_sa_handler = oact._sa_handler;
5246 old_act->sa_mask = oact.sa_mask.sig[0];
5247 old_act->sa_flags = oact.sa_flags;
5248 old_act->sa_restorer = oact.sa_restorer;
5249 unlock_user_struct(old_act, arg3, 1);
5250 }
5251 #endif
5252 }
5253 break;
5254 #endif
5255 case TARGET_NR_rt_sigaction:
5256 {
5257 #if defined(TARGET_ALPHA)
5258 struct target_sigaction act, oact, *pact = 0;
5259 struct target_rt_sigaction *rt_act;
5260 /* ??? arg4 == sizeof(sigset_t). */
5261 if (arg2) {
5262 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5263 goto efault;
5264 act._sa_handler = rt_act->_sa_handler;
5265 act.sa_mask = rt_act->sa_mask;
5266 act.sa_flags = rt_act->sa_flags;
5267 act.sa_restorer = arg5;
5268 unlock_user_struct(rt_act, arg2, 0);
5269 pact = &act;
5270 }
5271 ret = get_errno(do_sigaction(arg1, pact, &oact));
5272 if (!is_error(ret) && arg3) {
5273 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5274 goto efault;
5275 rt_act->_sa_handler = oact._sa_handler;
5276 rt_act->sa_mask = oact.sa_mask;
5277 rt_act->sa_flags = oact.sa_flags;
5278 unlock_user_struct(rt_act, arg3, 1);
5279 }
5280 #else
5281 struct target_sigaction *act;
5282 struct target_sigaction *oact;
5283
5284 if (arg2) {
5285 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5286 goto efault;
5287 } else
5288 act = NULL;
5289 if (arg3) {
5290 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5291 ret = -TARGET_EFAULT;
5292 goto rt_sigaction_fail;
5293 }
5294 } else
5295 oact = NULL;
5296 ret = get_errno(do_sigaction(arg1, act, oact));
5297 rt_sigaction_fail:
5298 if (act)
5299 unlock_user_struct(act, arg2, 0);
5300 if (oact)
5301 unlock_user_struct(oact, arg3, 1);
5302 #endif
5303 }
5304 break;
5305 #ifdef TARGET_NR_sgetmask /* not on alpha */
5306 case TARGET_NR_sgetmask:
5307 {
5308 sigset_t cur_set;
5309 abi_ulong target_set;
5310 sigprocmask(0, NULL, &cur_set);
5311 host_to_target_old_sigset(&target_set, &cur_set);
5312 ret = target_set;
5313 }
5314 break;
5315 #endif
5316 #ifdef TARGET_NR_ssetmask /* not on alpha */
5317 case TARGET_NR_ssetmask:
5318 {
5319 sigset_t set, oset, cur_set;
5320 abi_ulong target_set = arg1;
5321 sigprocmask(0, NULL, &cur_set);
5322 target_to_host_old_sigset(&set, &target_set);
5323 sigorset(&set, &set, &cur_set);
5324 sigprocmask(SIG_SETMASK, &set, &oset);
5325 host_to_target_old_sigset(&target_set, &oset);
5326 ret = target_set;
5327 }
5328 break;
5329 #endif
5330 #ifdef TARGET_NR_sigprocmask
5331 case TARGET_NR_sigprocmask:
5332 {
5333 #if defined(TARGET_ALPHA)
5334 sigset_t set, oldset;
5335 abi_ulong mask;
5336 int how;
5337
5338 switch (arg1) {
5339 case TARGET_SIG_BLOCK:
5340 how = SIG_BLOCK;
5341 break;
5342 case TARGET_SIG_UNBLOCK:
5343 how = SIG_UNBLOCK;
5344 break;
5345 case TARGET_SIG_SETMASK:
5346 how = SIG_SETMASK;
5347 break;
5348 default:
5349 ret = -TARGET_EINVAL;
5350 goto fail;
5351 }
5352 mask = arg2;
5353 target_to_host_old_sigset(&set, &mask);
5354
5355 ret = get_errno(sigprocmask(how, &set, &oldset));
5356
5357 if (!is_error(ret)) {
5358 host_to_target_old_sigset(&mask, &oldset);
5359 ret = mask;
5360 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5361 }
5362 #else
5363 sigset_t set, oldset, *set_ptr;
5364 int how;
5365
5366 if (arg2) {
5367 switch (arg1) {
5368 case TARGET_SIG_BLOCK:
5369 how = SIG_BLOCK;
5370 break;
5371 case TARGET_SIG_UNBLOCK:
5372 how = SIG_UNBLOCK;
5373 break;
5374 case TARGET_SIG_SETMASK:
5375 how = SIG_SETMASK;
5376 break;
5377 default:
5378 ret = -TARGET_EINVAL;
5379 goto fail;
5380 }
5381 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5382 goto efault;
5383 target_to_host_old_sigset(&set, p);
5384 unlock_user(p, arg2, 0);
5385 set_ptr = &set;
5386 } else {
5387 how = 0;
5388 set_ptr = NULL;
5389 }
5390 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5391 if (!is_error(ret) && arg3) {
5392 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5393 goto efault;
5394 host_to_target_old_sigset(p, &oldset);
5395 unlock_user(p, arg3, sizeof(target_sigset_t));
5396 }
5397 #endif
5398 }
5399 break;
5400 #endif
5401 case TARGET_NR_rt_sigprocmask:
5402 {
5403 int how = arg1;
5404 sigset_t set, oldset, *set_ptr;
5405
5406 if (arg2) {
5407 switch(how) {
5408 case TARGET_SIG_BLOCK:
5409 how = SIG_BLOCK;
5410 break;
5411 case TARGET_SIG_UNBLOCK:
5412 how = SIG_UNBLOCK;
5413 break;
5414 case TARGET_SIG_SETMASK:
5415 how = SIG_SETMASK;
5416 break;
5417 default:
5418 ret = -TARGET_EINVAL;
5419 goto fail;
5420 }
5421 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5422 goto efault;
5423 target_to_host_sigset(&set, p);
5424 unlock_user(p, arg2, 0);
5425 set_ptr = &set;
5426 } else {
5427 how = 0;
5428 set_ptr = NULL;
5429 }
5430 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5431 if (!is_error(ret) && arg3) {
5432 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5433 goto efault;
5434 host_to_target_sigset(p, &oldset);
5435 unlock_user(p, arg3, sizeof(target_sigset_t));
5436 }
5437 }
5438 break;
5439 #ifdef TARGET_NR_sigpending
5440 case TARGET_NR_sigpending:
5441 {
5442 sigset_t set;
5443 ret = get_errno(sigpending(&set));
5444 if (!is_error(ret)) {
5445 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5446 goto efault;
5447 host_to_target_old_sigset(p, &set);
5448 unlock_user(p, arg1, sizeof(target_sigset_t));
5449 }
5450 }
5451 break;
5452 #endif
5453 case TARGET_NR_rt_sigpending:
5454 {
5455 sigset_t set;
5456 ret = get_errno(sigpending(&set));
5457 if (!is_error(ret)) {
5458 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5459 goto efault;
5460 host_to_target_sigset(p, &set);
5461 unlock_user(p, arg1, sizeof(target_sigset_t));
5462 }
5463 }
5464 break;
5465 #ifdef TARGET_NR_sigsuspend
5466 case TARGET_NR_sigsuspend:
5467 {
5468 sigset_t set;
5469 #if defined(TARGET_ALPHA)
5470 abi_ulong mask = arg1;
5471 target_to_host_old_sigset(&set, &mask);
5472 #else
5473 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5474 goto efault;
5475 target_to_host_old_sigset(&set, p);
5476 unlock_user(p, arg1, 0);
5477 #endif
5478 ret = get_errno(sigsuspend(&set));
5479 }
5480 break;
5481 #endif
5482 case TARGET_NR_rt_sigsuspend:
5483 {
5484 sigset_t set;
5485 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5486 goto efault;
5487 target_to_host_sigset(&set, p);
5488 unlock_user(p, arg1, 0);
5489 ret = get_errno(sigsuspend(&set));
5490 }
5491 break;
5492 case TARGET_NR_rt_sigtimedwait:
5493 {
5494 sigset_t set;
5495 struct timespec uts, *puts;
5496 siginfo_t uinfo;
5497
5498 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5499 goto efault;
5500 target_to_host_sigset(&set, p);
5501 unlock_user(p, arg1, 0);
5502 if (arg3) {
5503 puts = &uts;
5504 target_to_host_timespec(puts, arg3);
5505 } else {
5506 puts = NULL;
5507 }
5508 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5509 if (!is_error(ret) && arg2) {
5510 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5511 goto efault;
5512 host_to_target_siginfo(p, &uinfo);
5513 unlock_user(p, arg2, sizeof(target_siginfo_t));
5514 }
5515 }
5516 break;
5517 case TARGET_NR_rt_sigqueueinfo:
5518 {
5519 siginfo_t uinfo;
5520 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5521 goto efault;
5522 target_to_host_siginfo(&uinfo, p);
5523 unlock_user(p, arg1, 0);
5524 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5525 }
5526 break;
5527 #ifdef TARGET_NR_sigreturn
5528 case TARGET_NR_sigreturn:
5529 /* NOTE: ret is eax, so not transcoding must be done */
5530 ret = do_sigreturn(cpu_env);
5531 break;
5532 #endif
5533 case TARGET_NR_rt_sigreturn:
5534 /* NOTE: ret is eax, so not transcoding must be done */
5535 ret = do_rt_sigreturn(cpu_env);
5536 break;
5537 case TARGET_NR_sethostname:
5538 if (!(p = lock_user_string(arg1)))
5539 goto efault;
5540 ret = get_errno(sethostname(p, arg2));
5541 unlock_user(p, arg1, 0);
5542 break;
5543 case TARGET_NR_setrlimit:
5544 {
5545 int resource = arg1;
5546 struct target_rlimit *target_rlim;
5547 struct rlimit rlim;
5548 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5549 goto efault;
5550 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5551 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5552 unlock_user_struct(target_rlim, arg2, 0);
5553 ret = get_errno(setrlimit(resource, &rlim));
5554 }
5555 break;
5556 case TARGET_NR_getrlimit:
5557 {
5558 int resource = arg1;
5559 struct target_rlimit *target_rlim;
5560 struct rlimit rlim;
5561
5562 ret = get_errno(getrlimit(resource, &rlim));
5563 if (!is_error(ret)) {
5564 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5565 goto efault;
5566 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5567 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5568 unlock_user_struct(target_rlim, arg2, 1);
5569 }
5570 }
5571 break;
5572 case TARGET_NR_getrusage:
5573 {
5574 struct rusage rusage;
5575 ret = get_errno(getrusage(arg1, &rusage));
5576 if (!is_error(ret)) {
5577 host_to_target_rusage(arg2, &rusage);
5578 }
5579 }
5580 break;
5581 case TARGET_NR_gettimeofday:
5582 {
5583 struct timeval tv;
5584 ret = get_errno(gettimeofday(&tv, NULL));
5585 if (!is_error(ret)) {
5586 if (copy_to_user_timeval(arg1, &tv))
5587 goto efault;
5588 }
5589 }
5590 break;
5591 case TARGET_NR_settimeofday:
5592 {
5593 struct timeval tv;
5594 if (copy_from_user_timeval(&tv, arg1))
5595 goto efault;
5596 ret = get_errno(settimeofday(&tv, NULL));
5597 }
5598 break;
5599 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5600 case TARGET_NR_select:
5601 {
5602 struct target_sel_arg_struct *sel;
5603 abi_ulong inp, outp, exp, tvp;
5604 long nsel;
5605
5606 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5607 goto efault;
5608 nsel = tswapl(sel->n);
5609 inp = tswapl(sel->inp);
5610 outp = tswapl(sel->outp);
5611 exp = tswapl(sel->exp);
5612 tvp = tswapl(sel->tvp);
5613 unlock_user_struct(sel, arg1, 0);
5614 ret = do_select(nsel, inp, outp, exp, tvp);
5615 }
5616 break;
5617 #endif
5618 #ifdef TARGET_NR_pselect6
5619 case TARGET_NR_pselect6:
5620 {
5621 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
5622 fd_set rfds, wfds, efds;
5623 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
5624 struct timespec ts, *ts_ptr;
5625
5626 /*
5627 * The 6th arg is actually two args smashed together,
5628 * so we cannot use the C library.
5629 */
5630 sigset_t set;
5631 struct {
5632 sigset_t *set;
5633 size_t size;
5634 } sig, *sig_ptr;
5635
5636 abi_ulong arg_sigset, arg_sigsize, *arg7;
5637 target_sigset_t *target_sigset;
5638
5639 n = arg1;
5640 rfd_addr = arg2;
5641 wfd_addr = arg3;
5642 efd_addr = arg4;
5643 ts_addr = arg5;
5644
5645 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
5646 if (ret) {
5647 goto fail;
5648 }
5649 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
5650 if (ret) {
5651 goto fail;
5652 }
5653 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
5654 if (ret) {
5655 goto fail;
5656 }
5657
5658 /*
5659 * This takes a timespec, and not a timeval, so we cannot
5660 * use the do_select() helper ...
5661 */
5662 if (ts_addr) {
5663 if (target_to_host_timespec(&ts, ts_addr)) {
5664 goto efault;
5665 }
5666 ts_ptr = &ts;
5667 } else {
5668 ts_ptr = NULL;
5669 }
5670
5671 /* Extract the two packed args for the sigset */
5672 if (arg6) {
5673 sig_ptr = &sig;
5674 sig.size = _NSIG / 8;
5675
5676 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
5677 if (!arg7) {
5678 goto efault;
5679 }
5680 arg_sigset = tswapl(arg7[0]);
5681 arg_sigsize = tswapl(arg7[1]);
5682 unlock_user(arg7, arg6, 0);
5683
5684 if (arg_sigset) {
5685 sig.set = &set;
5686 target_sigset = lock_user(VERIFY_READ, arg_sigset,
5687 sizeof(*target_sigset), 1);
5688 if (!target_sigset) {
5689 goto efault;
5690 }
5691 target_to_host_sigset(&set, target_sigset);
5692 unlock_user(target_sigset, arg_sigset, 0);
5693 } else {
5694 sig.set = NULL;
5695 }
5696 } else {
5697 sig_ptr = NULL;
5698 }
5699
5700 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
5701 ts_ptr, sig_ptr));
5702
5703 if (!is_error(ret)) {
5704 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
5705 goto efault;
5706 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
5707 goto efault;
5708 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
5709 goto efault;
5710
5711 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
5712 goto efault;
5713 }
5714 }
5715 break;
5716 #endif
5717 case TARGET_NR_symlink:
5718 {
5719 void *p2;
5720 p = lock_user_string(arg1);
5721 p2 = lock_user_string(arg2);
5722 if (!p || !p2)
5723 ret = -TARGET_EFAULT;
5724 else
5725 ret = get_errno(symlink(p, p2));
5726 unlock_user(p2, arg2, 0);
5727 unlock_user(p, arg1, 0);
5728 }
5729 break;
5730 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5731 case TARGET_NR_symlinkat:
5732 {
5733 void *p2;
5734 p = lock_user_string(arg1);
5735 p2 = lock_user_string(arg3);
5736 if (!p || !p2)
5737 ret = -TARGET_EFAULT;
5738 else
5739 ret = get_errno(sys_symlinkat(p, arg2, p2));
5740 unlock_user(p2, arg3, 0);
5741 unlock_user(p, arg1, 0);
5742 }
5743 break;
5744 #endif
5745 #ifdef TARGET_NR_oldlstat
5746 case TARGET_NR_oldlstat:
5747 goto unimplemented;
5748 #endif
5749 case TARGET_NR_readlink:
5750 {
5751 void *p2, *temp;
5752 p = lock_user_string(arg1);
5753 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5754 if (!p || !p2)
5755 ret = -TARGET_EFAULT;
5756 else {
5757 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5758 char real[PATH_MAX];
5759 temp = realpath(exec_path,real);
5760 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5761 snprintf((char *)p2, arg3, "%s", real);
5762 }
5763 else
5764 ret = get_errno(readlink(path(p), p2, arg3));
5765 }
5766 unlock_user(p2, arg2, ret);
5767 unlock_user(p, arg1, 0);
5768 }
5769 break;
5770 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5771 case TARGET_NR_readlinkat:
5772 {
5773 void *p2;
5774 p = lock_user_string(arg2);
5775 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5776 if (!p || !p2)
5777 ret = -TARGET_EFAULT;
5778 else
5779 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5780 unlock_user(p2, arg3, ret);
5781 unlock_user(p, arg2, 0);
5782 }
5783 break;
5784 #endif
5785 #ifdef TARGET_NR_uselib
5786 case TARGET_NR_uselib:
5787 goto unimplemented;
5788 #endif
5789 #ifdef TARGET_NR_swapon
5790 case TARGET_NR_swapon:
5791 if (!(p = lock_user_string(arg1)))
5792 goto efault;
5793 ret = get_errno(swapon(p, arg2));
5794 unlock_user(p, arg1, 0);
5795 break;
5796 #endif
5797 case TARGET_NR_reboot:
5798 goto unimplemented;
5799 #ifdef TARGET_NR_readdir
5800 case TARGET_NR_readdir:
5801 goto unimplemented;
5802 #endif
5803 #ifdef TARGET_NR_mmap
5804 case TARGET_NR_mmap:
5805 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5806 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5807 || defined(TARGET_S390X)
5808 {
5809 abi_ulong *v;
5810 abi_ulong v1, v2, v3, v4, v5, v6;
5811 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5812 goto efault;
5813 v1 = tswapl(v[0]);
5814 v2 = tswapl(v[1]);
5815 v3 = tswapl(v[2]);
5816 v4 = tswapl(v[3]);
5817 v5 = tswapl(v[4]);
5818 v6 = tswapl(v[5]);
5819 unlock_user(v, arg1, 0);
5820 ret = get_errno(target_mmap(v1, v2, v3,
5821 target_to_host_bitmask(v4, mmap_flags_tbl),
5822 v5, v6));
5823 }
5824 #else
5825 ret = get_errno(target_mmap(arg1, arg2, arg3,
5826 target_to_host_bitmask(arg4, mmap_flags_tbl),
5827 arg5,
5828 arg6));
5829 #endif
5830 break;
5831 #endif
5832 #ifdef TARGET_NR_mmap2
5833 case TARGET_NR_mmap2:
5834 #ifndef MMAP_SHIFT
5835 #define MMAP_SHIFT 12
5836 #endif
5837 ret = get_errno(target_mmap(arg1, arg2, arg3,
5838 target_to_host_bitmask(arg4, mmap_flags_tbl),
5839 arg5,
5840 arg6 << MMAP_SHIFT));
5841 break;
5842 #endif
5843 case TARGET_NR_munmap:
5844 ret = get_errno(target_munmap(arg1, arg2));
5845 break;
5846 case TARGET_NR_mprotect:
5847 {
5848 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5849 /* Special hack to detect libc making the stack executable. */
5850 if ((arg3 & PROT_GROWSDOWN)
5851 && arg1 >= ts->info->stack_limit
5852 && arg1 <= ts->info->start_stack) {
5853 arg3 &= ~PROT_GROWSDOWN;
5854 arg2 = arg2 + arg1 - ts->info->stack_limit;
5855 arg1 = ts->info->stack_limit;
5856 }
5857 }
5858 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5859 break;
5860 #ifdef TARGET_NR_mremap
5861 case TARGET_NR_mremap:
5862 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5863 break;
5864 #endif
5865 /* ??? msync/mlock/munlock are broken for softmmu. */
5866 #ifdef TARGET_NR_msync
5867 case TARGET_NR_msync:
5868 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5869 break;
5870 #endif
5871 #ifdef TARGET_NR_mlock
5872 case TARGET_NR_mlock:
5873 ret = get_errno(mlock(g2h(arg1), arg2));
5874 break;
5875 #endif
5876 #ifdef TARGET_NR_munlock
5877 case TARGET_NR_munlock:
5878 ret = get_errno(munlock(g2h(arg1), arg2));
5879 break;
5880 #endif
5881 #ifdef TARGET_NR_mlockall
5882 case TARGET_NR_mlockall:
5883 ret = get_errno(mlockall(arg1));
5884 break;
5885 #endif
5886 #ifdef TARGET_NR_munlockall
5887 case TARGET_NR_munlockall:
5888 ret = get_errno(munlockall());
5889 break;
5890 #endif
5891 case TARGET_NR_truncate:
5892 if (!(p = lock_user_string(arg1)))
5893 goto efault;
5894 ret = get_errno(truncate(p, arg2));
5895 unlock_user(p, arg1, 0);
5896 break;
5897 case TARGET_NR_ftruncate:
5898 ret = get_errno(ftruncate(arg1, arg2));
5899 break;
5900 case TARGET_NR_fchmod:
5901 ret = get_errno(fchmod(arg1, arg2));
5902 break;
5903 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5904 case TARGET_NR_fchmodat:
5905 if (!(p = lock_user_string(arg2)))
5906 goto efault;
5907 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5908 unlock_user(p, arg2, 0);
5909 break;
5910 #endif
5911 case TARGET_NR_getpriority:
5912 /* libc does special remapping of the return value of
5913 * sys_getpriority() so it's just easiest to call
5914 * sys_getpriority() directly rather than through libc. */
5915 ret = get_errno(sys_getpriority(arg1, arg2));
5916 break;
5917 case TARGET_NR_setpriority:
5918 ret = get_errno(setpriority(arg1, arg2, arg3));
5919 break;
5920 #ifdef TARGET_NR_profil
5921 case TARGET_NR_profil:
5922 goto unimplemented;
5923 #endif
5924 case TARGET_NR_statfs:
5925 if (!(p = lock_user_string(arg1)))
5926 goto efault;
5927 ret = get_errno(statfs(path(p), &stfs));
5928 unlock_user(p, arg1, 0);
5929 convert_statfs:
5930 if (!is_error(ret)) {
5931 struct target_statfs *target_stfs;
5932
5933 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5934 goto efault;
5935 __put_user(stfs.f_type, &target_stfs->f_type);
5936 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5937 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5938 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5939 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5940 __put_user(stfs.f_files, &target_stfs->f_files);
5941 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5942 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5943 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5944 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5945 unlock_user_struct(target_stfs, arg2, 1);
5946 }
5947 break;
5948 case TARGET_NR_fstatfs:
5949 ret = get_errno(fstatfs(arg1, &stfs));
5950 goto convert_statfs;
5951 #ifdef TARGET_NR_statfs64
5952 case TARGET_NR_statfs64:
5953 if (!(p = lock_user_string(arg1)))
5954 goto efault;
5955 ret = get_errno(statfs(path(p), &stfs));
5956 unlock_user(p, arg1, 0);
5957 convert_statfs64:
5958 if (!is_error(ret)) {
5959 struct target_statfs64 *target_stfs;
5960
5961 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5962 goto efault;
5963 __put_user(stfs.f_type, &target_stfs->f_type);
5964 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5965 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5966 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5967 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5968 __put_user(stfs.f_files, &target_stfs->f_files);
5969 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5970 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5971 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5972 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5973 unlock_user_struct(target_stfs, arg3, 1);
5974 }
5975 break;
5976 case TARGET_NR_fstatfs64:
5977 ret = get_errno(fstatfs(arg1, &stfs));
5978 goto convert_statfs64;
5979 #endif
5980 #ifdef TARGET_NR_ioperm
5981 case TARGET_NR_ioperm:
5982 goto unimplemented;
5983 #endif
5984 #ifdef TARGET_NR_socketcall
5985 case TARGET_NR_socketcall:
5986 ret = do_socketcall(arg1, arg2);
5987 break;
5988 #endif
5989 #ifdef TARGET_NR_accept
5990 case TARGET_NR_accept:
5991 ret = do_accept(arg1, arg2, arg3);
5992 break;
5993 #endif
5994 #ifdef TARGET_NR_bind
5995 case TARGET_NR_bind:
5996 ret = do_bind(arg1, arg2, arg3);
5997 break;
5998 #endif
5999 #ifdef TARGET_NR_connect
6000 case TARGET_NR_connect:
6001 ret = do_connect(arg1, arg2, arg3);
6002 break;
6003 #endif
6004 #ifdef TARGET_NR_getpeername
6005 case TARGET_NR_getpeername:
6006 ret = do_getpeername(arg1, arg2, arg3);
6007 break;
6008 #endif
6009 #ifdef TARGET_NR_getsockname
6010 case TARGET_NR_getsockname:
6011 ret = do_getsockname(arg1, arg2, arg3);
6012 break;
6013 #endif
6014 #ifdef TARGET_NR_getsockopt
6015 case TARGET_NR_getsockopt:
6016 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6017 break;
6018 #endif
6019 #ifdef TARGET_NR_listen
6020 case TARGET_NR_listen:
6021 ret = get_errno(listen(arg1, arg2));
6022 break;
6023 #endif
6024 #ifdef TARGET_NR_recv
6025 case TARGET_NR_recv:
6026 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6027 break;
6028 #endif
6029 #ifdef TARGET_NR_recvfrom
6030 case TARGET_NR_recvfrom:
6031 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6032 break;
6033 #endif
6034 #ifdef TARGET_NR_recvmsg
6035 case TARGET_NR_recvmsg:
6036 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6037 break;
6038 #endif
6039 #ifdef TARGET_NR_send
6040 case TARGET_NR_send:
6041 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6042 break;
6043 #endif
6044 #ifdef TARGET_NR_sendmsg
6045 case TARGET_NR_sendmsg:
6046 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6047 break;
6048 #endif
6049 #ifdef TARGET_NR_sendto
6050 case TARGET_NR_sendto:
6051 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6052 break;
6053 #endif
6054 #ifdef TARGET_NR_shutdown
6055 case TARGET_NR_shutdown:
6056 ret = get_errno(shutdown(arg1, arg2));
6057 break;
6058 #endif
6059 #ifdef TARGET_NR_socket
6060 case TARGET_NR_socket:
6061 ret = do_socket(arg1, arg2, arg3);
6062 break;
6063 #endif
6064 #ifdef TARGET_NR_socketpair
6065 case TARGET_NR_socketpair:
6066 ret = do_socketpair(arg1, arg2, arg3, arg4);
6067 break;
6068 #endif
6069 #ifdef TARGET_NR_setsockopt
6070 case TARGET_NR_setsockopt:
6071 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6072 break;
6073 #endif
6074
6075 case TARGET_NR_syslog:
6076 if (!(p = lock_user_string(arg2)))
6077 goto efault;
6078 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6079 unlock_user(p, arg2, 0);
6080 break;
6081
6082 case TARGET_NR_setitimer:
6083 {
6084 struct itimerval value, ovalue, *pvalue;
6085
6086 if (arg2) {
6087 pvalue = &value;
6088 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6089 || copy_from_user_timeval(&pvalue->it_value,
6090 arg2 + sizeof(struct target_timeval)))
6091 goto efault;
6092 } else {
6093 pvalue = NULL;
6094 }
6095 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6096 if (!is_error(ret) && arg3) {
6097 if (copy_to_user_timeval(arg3,
6098 &ovalue.it_interval)
6099 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6100 &ovalue.it_value))
6101 goto efault;
6102 }
6103 }
6104 break;
6105 case TARGET_NR_getitimer:
6106 {
6107 struct itimerval value;
6108
6109 ret = get_errno(getitimer(arg1, &value));
6110 if (!is_error(ret) && arg2) {
6111 if (copy_to_user_timeval(arg2,
6112 &value.it_interval)
6113 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6114 &value.it_value))
6115 goto efault;
6116 }
6117 }
6118 break;
6119 case TARGET_NR_stat:
6120 if (!(p = lock_user_string(arg1)))
6121 goto efault;
6122 ret = get_errno(stat(path(p), &st));
6123 unlock_user(p, arg1, 0);
6124 goto do_stat;
6125 case TARGET_NR_lstat:
6126 if (!(p = lock_user_string(arg1)))
6127 goto efault;
6128 ret = get_errno(lstat(path(p), &st));
6129 unlock_user(p, arg1, 0);
6130 goto do_stat;
6131 case TARGET_NR_fstat:
6132 {
6133 ret = get_errno(fstat(arg1, &st));
6134 do_stat:
6135 if (!is_error(ret)) {
6136 struct target_stat *target_st;
6137
6138 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6139 goto efault;
6140 memset(target_st, 0, sizeof(*target_st));
6141 __put_user(st.st_dev, &target_st->st_dev);
6142 __put_user(st.st_ino, &target_st->st_ino);
6143 __put_user(st.st_mode, &target_st->st_mode);
6144 __put_user(st.st_uid, &target_st->st_uid);
6145 __put_user(st.st_gid, &target_st->st_gid);
6146 __put_user(st.st_nlink, &target_st->st_nlink);
6147 __put_user(st.st_rdev, &target_st->st_rdev);
6148 __put_user(st.st_size, &target_st->st_size);
6149 __put_user(st.st_blksize, &target_st->st_blksize);
6150 __put_user(st.st_blocks, &target_st->st_blocks);
6151 __put_user(st.st_atime, &target_st->target_st_atime);
6152 __put_user(st.st_mtime, &target_st->target_st_mtime);
6153 __put_user(st.st_ctime, &target_st->target_st_ctime);
6154 unlock_user_struct(target_st, arg2, 1);
6155 }
6156 }
6157 break;
6158 #ifdef TARGET_NR_olduname
6159 case TARGET_NR_olduname:
6160 goto unimplemented;
6161 #endif
6162 #ifdef TARGET_NR_iopl
6163 case TARGET_NR_iopl:
6164 goto unimplemented;
6165 #endif
6166 case TARGET_NR_vhangup:
6167 ret = get_errno(vhangup());
6168 break;
6169 #ifdef TARGET_NR_idle
6170 case TARGET_NR_idle:
6171 goto unimplemented;
6172 #endif
6173 #ifdef TARGET_NR_syscall
6174 case TARGET_NR_syscall:
6175 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
6176 break;
6177 #endif
6178 case TARGET_NR_wait4:
6179 {
6180 int status;
6181 abi_long status_ptr = arg2;
6182 struct rusage rusage, *rusage_ptr;
6183 abi_ulong target_rusage = arg4;
6184 if (target_rusage)
6185 rusage_ptr = &rusage;
6186 else
6187 rusage_ptr = NULL;
6188 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6189 if (!is_error(ret)) {
6190 if (status_ptr) {
6191 status = host_to_target_waitstatus(status);
6192 if (put_user_s32(status, status_ptr))
6193 goto efault;
6194 }
6195 if (target_rusage)
6196 host_to_target_rusage(target_rusage, &rusage);
6197 }
6198 }
6199 break;
6200 #ifdef TARGET_NR_swapoff
6201 case TARGET_NR_swapoff:
6202 if (!(p = lock_user_string(arg1)))
6203 goto efault;
6204 ret = get_errno(swapoff(p));
6205 unlock_user(p, arg1, 0);
6206 break;
6207 #endif
6208 case TARGET_NR_sysinfo:
6209 {
6210 struct target_sysinfo *target_value;
6211 struct sysinfo value;
6212 ret = get_errno(sysinfo(&value));
6213 if (!is_error(ret) && arg1)
6214 {
6215 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6216 goto efault;
6217 __put_user(value.uptime, &target_value->uptime);
6218 __put_user(value.loads[0], &target_value->loads[0]);
6219 __put_user(value.loads[1], &target_value->loads[1]);
6220 __put_user(value.loads[2], &target_value->loads[2]);
6221 __put_user(value.totalram, &target_value->totalram);
6222 __put_user(value.freeram, &target_value->freeram);
6223 __put_user(value.sharedram, &target_value->sharedram);
6224 __put_user(value.bufferram, &target_value->bufferram);
6225 __put_user(value.totalswap, &target_value->totalswap);
6226 __put_user(value.freeswap, &target_value->freeswap);
6227 __put_user(value.procs, &target_value->procs);
6228 __put_user(value.totalhigh, &target_value->totalhigh);
6229 __put_user(value.freehigh, &target_value->freehigh);
6230 __put_user(value.mem_unit, &target_value->mem_unit);
6231 unlock_user_struct(target_value, arg1, 1);
6232 }
6233 }
6234 break;
6235 #ifdef TARGET_NR_ipc
6236 case TARGET_NR_ipc:
6237 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6238 break;
6239 #endif
6240 #ifdef TARGET_NR_semget
6241 case TARGET_NR_semget:
6242 ret = get_errno(semget(arg1, arg2, arg3));
6243 break;
6244 #endif
6245 #ifdef TARGET_NR_semop
6246 case TARGET_NR_semop:
6247 ret = get_errno(do_semop(arg1, arg2, arg3));
6248 break;
6249 #endif
6250 #ifdef TARGET_NR_semctl
6251 case TARGET_NR_semctl:
6252 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6253 break;
6254 #endif
6255 #ifdef TARGET_NR_msgctl
6256 case TARGET_NR_msgctl:
6257 ret = do_msgctl(arg1, arg2, arg3);
6258 break;
6259 #endif
6260 #ifdef TARGET_NR_msgget
6261 case TARGET_NR_msgget:
6262 ret = get_errno(msgget(arg1, arg2));
6263 break;
6264 #endif
6265 #ifdef TARGET_NR_msgrcv
6266 case TARGET_NR_msgrcv:
6267 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6268 break;
6269 #endif
6270 #ifdef TARGET_NR_msgsnd
6271 case TARGET_NR_msgsnd:
6272 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6273 break;
6274 #endif
6275 #ifdef TARGET_NR_shmget
6276 case TARGET_NR_shmget:
6277 ret = get_errno(shmget(arg1, arg2, arg3));
6278 break;
6279 #endif
6280 #ifdef TARGET_NR_shmctl
6281 case TARGET_NR_shmctl:
6282 ret = do_shmctl(arg1, arg2, arg3);
6283 break;
6284 #endif
6285 #ifdef TARGET_NR_shmat
6286 case TARGET_NR_shmat:
6287 ret = do_shmat(arg1, arg2, arg3);
6288 break;
6289 #endif
6290 #ifdef TARGET_NR_shmdt
6291 case TARGET_NR_shmdt:
6292 ret = do_shmdt(arg1);
6293 break;
6294 #endif
6295 case TARGET_NR_fsync:
6296 ret = get_errno(fsync(arg1));
6297 break;
6298 case TARGET_NR_clone:
6299 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6300 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6301 #elif defined(TARGET_CRIS)
6302 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6303 #elif defined(TARGET_S390X)
6304 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6305 #else
6306 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6307 #endif
6308 break;
6309 #ifdef __NR_exit_group
6310 /* new thread calls */
6311 case TARGET_NR_exit_group:
6312 #ifdef TARGET_GPROF
6313 _mcleanup();
6314 #endif
6315 gdb_exit(cpu_env, arg1);
6316 ret = get_errno(exit_group(arg1));
6317 break;
6318 #endif
6319 case TARGET_NR_setdomainname:
6320 if (!(p = lock_user_string(arg1)))
6321 goto efault;
6322 ret = get_errno(setdomainname(p, arg2));
6323 unlock_user(p, arg1, 0);
6324 break;
6325 case TARGET_NR_uname:
6326 /* no need to transcode because we use the linux syscall */
6327 {
6328 struct new_utsname * buf;
6329
6330 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6331 goto efault;
6332 ret = get_errno(sys_uname(buf));
6333 if (!is_error(ret)) {
6334 /* Overrite the native machine name with whatever is being
6335 emulated. */
6336 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6337 /* Allow the user to override the reported release. */
6338 if (qemu_uname_release && *qemu_uname_release)
6339 strcpy (buf->release, qemu_uname_release);
6340 }
6341 unlock_user_struct(buf, arg1, 1);
6342 }
6343 break;
6344 #ifdef TARGET_I386
6345 case TARGET_NR_modify_ldt:
6346 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6347 break;
6348 #if !defined(TARGET_X86_64)
6349 case TARGET_NR_vm86old:
6350 goto unimplemented;
6351 case TARGET_NR_vm86:
6352 ret = do_vm86(cpu_env, arg1, arg2);
6353 break;
6354 #endif
6355 #endif
6356 case TARGET_NR_adjtimex:
6357 goto unimplemented;
6358 #ifdef TARGET_NR_create_module
6359 case TARGET_NR_create_module:
6360 #endif
6361 case TARGET_NR_init_module:
6362 case TARGET_NR_delete_module:
6363 #ifdef TARGET_NR_get_kernel_syms
6364 case TARGET_NR_get_kernel_syms:
6365 #endif
6366 goto unimplemented;
6367 case TARGET_NR_quotactl:
6368 goto unimplemented;
6369 case TARGET_NR_getpgid:
6370 ret = get_errno(getpgid(arg1));
6371 break;
6372 case TARGET_NR_fchdir:
6373 ret = get_errno(fchdir(arg1));
6374 break;
6375 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6376 case TARGET_NR_bdflush:
6377 goto unimplemented;
6378 #endif
6379 #ifdef TARGET_NR_sysfs
6380 case TARGET_NR_sysfs:
6381 goto unimplemented;
6382 #endif
6383 case TARGET_NR_personality:
6384 ret = get_errno(personality(arg1));
6385 break;
6386 #ifdef TARGET_NR_afs_syscall
6387 case TARGET_NR_afs_syscall:
6388 goto unimplemented;
6389 #endif
6390 #ifdef TARGET_NR__llseek /* Not on alpha */
6391 case TARGET_NR__llseek:
6392 {
6393 int64_t res;
6394 #if !defined(__NR_llseek)
6395 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6396 if (res == -1) {
6397 ret = get_errno(res);
6398 } else {
6399 ret = 0;
6400 }
6401 #else
6402 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6403 #endif
6404 if ((ret == 0) && put_user_s64(res, arg4)) {
6405 goto efault;
6406 }
6407 }
6408 break;
6409 #endif
6410 case TARGET_NR_getdents:
6411 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6412 {
6413 struct target_dirent *target_dirp;
6414 struct linux_dirent *dirp;
6415 abi_long count = arg3;
6416
6417 dirp = malloc(count);
6418 if (!dirp) {
6419 ret = -TARGET_ENOMEM;
6420 goto fail;
6421 }
6422
6423 ret = get_errno(sys_getdents(arg1, dirp, count));
6424 if (!is_error(ret)) {
6425 struct linux_dirent *de;
6426 struct target_dirent *tde;
6427 int len = ret;
6428 int reclen, treclen;
6429 int count1, tnamelen;
6430
6431 count1 = 0;
6432 de = dirp;
6433 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6434 goto efault;
6435 tde = target_dirp;
6436 while (len > 0) {
6437 reclen = de->d_reclen;
6438 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6439 tde->d_reclen = tswap16(treclen);
6440 tde->d_ino = tswapl(de->d_ino);
6441 tde->d_off = tswapl(de->d_off);
6442 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6443 if (tnamelen > 256)
6444 tnamelen = 256;
6445 /* XXX: may not be correct */
6446 pstrcpy(tde->d_name, tnamelen, de->d_name);
6447 de = (struct linux_dirent *)((char *)de + reclen);
6448 len -= reclen;
6449 tde = (struct target_dirent *)((char *)tde + treclen);
6450 count1 += treclen;
6451 }
6452 ret = count1;
6453 unlock_user(target_dirp, arg2, ret);
6454 }
6455 free(dirp);
6456 }
6457 #else
6458 {
6459 struct linux_dirent *dirp;
6460 abi_long count = arg3;
6461
6462 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6463 goto efault;
6464 ret = get_errno(sys_getdents(arg1, dirp, count));
6465 if (!is_error(ret)) {
6466 struct linux_dirent *de;
6467 int len = ret;
6468 int reclen;
6469 de = dirp;
6470 while (len > 0) {
6471 reclen = de->d_reclen;
6472 if (reclen > len)
6473 break;
6474 de->d_reclen = tswap16(reclen);
6475 tswapls(&de->d_ino);
6476 tswapls(&de->d_off);
6477 de = (struct linux_dirent *)((char *)de + reclen);
6478 len -= reclen;
6479 }
6480 }
6481 unlock_user(dirp, arg2, ret);
6482 }
6483 #endif
6484 break;
6485 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6486 case TARGET_NR_getdents64:
6487 {
6488 struct linux_dirent64 *dirp;
6489 abi_long count = arg3;
6490 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6491 goto efault;
6492 ret = get_errno(sys_getdents64(arg1, dirp, count));
6493 if (!is_error(ret)) {
6494 struct linux_dirent64 *de;
6495 int len = ret;
6496 int reclen;
6497 de = dirp;
6498 while (len > 0) {
6499 reclen = de->d_reclen;
6500 if (reclen > len)
6501 break;
6502 de->d_reclen = tswap16(reclen);
6503 tswap64s((uint64_t *)&de->d_ino);
6504 tswap64s((uint64_t *)&de->d_off);
6505 de = (struct linux_dirent64 *)((char *)de + reclen);
6506 len -= reclen;
6507 }
6508 }
6509 unlock_user(dirp, arg2, ret);
6510 }
6511 break;
6512 #endif /* TARGET_NR_getdents64 */
6513 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6514 #ifdef TARGET_S390X
6515 case TARGET_NR_select:
6516 #else
6517 case TARGET_NR__newselect:
6518 #endif
6519 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6520 break;
6521 #endif
6522 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6523 # ifdef TARGET_NR_poll
6524 case TARGET_NR_poll:
6525 # endif
6526 # ifdef TARGET_NR_ppoll
6527 case TARGET_NR_ppoll:
6528 # endif
6529 {
6530 struct target_pollfd *target_pfd;
6531 unsigned int nfds = arg2;
6532 int timeout = arg3;
6533 struct pollfd *pfd;
6534 unsigned int i;
6535
6536 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6537 if (!target_pfd)
6538 goto efault;
6539
6540 pfd = alloca(sizeof(struct pollfd) * nfds);
6541 for(i = 0; i < nfds; i++) {
6542 pfd[i].fd = tswap32(target_pfd[i].fd);
6543 pfd[i].events = tswap16(target_pfd[i].events);
6544 }
6545
6546 # ifdef TARGET_NR_ppoll
6547 if (num == TARGET_NR_ppoll) {
6548 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6549 target_sigset_t *target_set;
6550 sigset_t _set, *set = &_set;
6551
6552 if (arg3) {
6553 if (target_to_host_timespec(timeout_ts, arg3)) {
6554 unlock_user(target_pfd, arg1, 0);
6555 goto efault;
6556 }
6557 } else {
6558 timeout_ts = NULL;
6559 }
6560
6561 if (arg4) {
6562 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6563 if (!target_set) {
6564 unlock_user(target_pfd, arg1, 0);
6565 goto efault;
6566 }
6567 target_to_host_sigset(set, target_set);
6568 } else {
6569 set = NULL;
6570 }
6571
6572 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6573
6574 if (!is_error(ret) && arg3) {
6575 host_to_target_timespec(arg3, timeout_ts);
6576 }
6577 if (arg4) {
6578 unlock_user(target_set, arg4, 0);
6579 }
6580 } else
6581 # endif
6582 ret = get_errno(poll(pfd, nfds, timeout));
6583
6584 if (!is_error(ret)) {
6585 for(i = 0; i < nfds; i++) {
6586 target_pfd[i].revents = tswap16(pfd[i].revents);
6587 }
6588 }
6589 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6590 }
6591 break;
6592 #endif
6593 case TARGET_NR_flock:
6594 /* NOTE: the flock constant seems to be the same for every
6595 Linux platform */
6596 ret = get_errno(flock(arg1, arg2));
6597 break;
6598 case TARGET_NR_readv:
6599 {
6600 int count = arg3;
6601 struct iovec *vec;
6602
6603 vec = alloca(count * sizeof(struct iovec));
6604 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6605 goto efault;
6606 ret = get_errno(readv(arg1, vec, count));
6607 unlock_iovec(vec, arg2, count, 1);
6608 }
6609 break;
6610 case TARGET_NR_writev:
6611 {
6612 int count = arg3;
6613 struct iovec *vec;
6614
6615 vec = alloca(count * sizeof(struct iovec));
6616 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6617 goto efault;
6618 ret = get_errno(writev(arg1, vec, count));
6619 unlock_iovec(vec, arg2, count, 0);
6620 }
6621 break;
6622 case TARGET_NR_getsid:
6623 ret = get_errno(getsid(arg1));
6624 break;
6625 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6626 case TARGET_NR_fdatasync:
6627 ret = get_errno(fdatasync(arg1));
6628 break;
6629 #endif
6630 case TARGET_NR__sysctl:
6631 /* We don't implement this, but ENOTDIR is always a safe
6632 return value. */
6633 ret = -TARGET_ENOTDIR;
6634 break;
6635 case TARGET_NR_sched_getaffinity:
6636 {
6637 unsigned int mask_size;
6638 unsigned long *mask;
6639
6640 /*
6641 * sched_getaffinity needs multiples of ulong, so need to take
6642 * care of mismatches between target ulong and host ulong sizes.
6643 */
6644 if (arg2 & (sizeof(abi_ulong) - 1)) {
6645 ret = -TARGET_EINVAL;
6646 break;
6647 }
6648 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6649
6650 mask = alloca(mask_size);
6651 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6652
6653 if (!is_error(ret)) {
6654 if (copy_to_user(arg3, mask, ret)) {
6655 goto efault;
6656 }
6657 }
6658 }
6659 break;
6660 case TARGET_NR_sched_setaffinity:
6661 {
6662 unsigned int mask_size;
6663 unsigned long *mask;
6664
6665 /*
6666 * sched_setaffinity needs multiples of ulong, so need to take
6667 * care of mismatches between target ulong and host ulong sizes.
6668 */
6669 if (arg2 & (sizeof(abi_ulong) - 1)) {
6670 ret = -TARGET_EINVAL;
6671 break;
6672 }
6673 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6674
6675 mask = alloca(mask_size);
6676 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6677 goto efault;
6678 }
6679 memcpy(mask, p, arg2);
6680 unlock_user_struct(p, arg2, 0);
6681
6682 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6683 }
6684 break;
6685 case TARGET_NR_sched_setparam:
6686 {
6687 struct sched_param *target_schp;
6688 struct sched_param schp;
6689
6690 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6691 goto efault;
6692 schp.sched_priority = tswap32(target_schp->sched_priority);
6693 unlock_user_struct(target_schp, arg2, 0);
6694 ret = get_errno(sched_setparam(arg1, &schp));
6695 }
6696 break;
6697 case TARGET_NR_sched_getparam:
6698 {
6699 struct sched_param *target_schp;
6700 struct sched_param schp;
6701 ret = get_errno(sched_getparam(arg1, &schp));
6702 if (!is_error(ret)) {
6703 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6704 goto efault;
6705 target_schp->sched_priority = tswap32(schp.sched_priority);
6706 unlock_user_struct(target_schp, arg2, 1);
6707 }
6708 }
6709 break;
6710 case TARGET_NR_sched_setscheduler:
6711 {
6712 struct sched_param *target_schp;
6713 struct sched_param schp;
6714 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6715 goto efault;
6716 schp.sched_priority = tswap32(target_schp->sched_priority);
6717 unlock_user_struct(target_schp, arg3, 0);
6718 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6719 }
6720 break;
6721 case TARGET_NR_sched_getscheduler:
6722 ret = get_errno(sched_getscheduler(arg1));
6723 break;
6724 case TARGET_NR_sched_yield:
6725 ret = get_errno(sched_yield());
6726 break;
6727 case TARGET_NR_sched_get_priority_max:
6728 ret = get_errno(sched_get_priority_max(arg1));
6729 break;
6730 case TARGET_NR_sched_get_priority_min:
6731 ret = get_errno(sched_get_priority_min(arg1));
6732 break;
6733 case TARGET_NR_sched_rr_get_interval:
6734 {
6735 struct timespec ts;
6736 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6737 if (!is_error(ret)) {
6738 host_to_target_timespec(arg2, &ts);
6739 }
6740 }
6741 break;
6742 case TARGET_NR_nanosleep:
6743 {
6744 struct timespec req, rem;
6745 target_to_host_timespec(&req, arg1);
6746 ret = get_errno(nanosleep(&req, &rem));
6747 if (is_error(ret) && arg2) {
6748 host_to_target_timespec(arg2, &rem);
6749 }
6750 }
6751 break;
6752 #ifdef TARGET_NR_query_module
6753 case TARGET_NR_query_module:
6754 goto unimplemented;
6755 #endif
6756 #ifdef TARGET_NR_nfsservctl
6757 case TARGET_NR_nfsservctl:
6758 goto unimplemented;
6759 #endif
6760 case TARGET_NR_prctl:
6761 switch (arg1)
6762 {
6763 case PR_GET_PDEATHSIG:
6764 {
6765 int deathsig;
6766 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6767 if (!is_error(ret) && arg2
6768 && put_user_ual(deathsig, arg2))
6769 goto efault;
6770 }
6771 break;
6772 default:
6773 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6774 break;
6775 }
6776 break;
6777 #ifdef TARGET_NR_arch_prctl
6778 case TARGET_NR_arch_prctl:
6779 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6780 ret = do_arch_prctl(cpu_env, arg1, arg2);
6781 break;
6782 #else
6783 goto unimplemented;
6784 #endif
6785 #endif
6786 #ifdef TARGET_NR_pread
6787 case TARGET_NR_pread:
6788 #ifdef TARGET_ARM
6789 if (((CPUARMState *)cpu_env)->eabi)
6790 arg4 = arg5;
6791 #endif
6792 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6793 goto efault;
6794 ret = get_errno(pread(arg1, p, arg3, arg4));
6795 unlock_user(p, arg2, ret);
6796 break;
6797 case TARGET_NR_pwrite:
6798 #ifdef TARGET_ARM
6799 if (((CPUARMState *)cpu_env)->eabi)
6800 arg4 = arg5;
6801 #endif
6802 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6803 goto efault;
6804 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6805 unlock_user(p, arg2, 0);
6806 break;
6807 #endif
6808 #ifdef TARGET_NR_pread64
6809 case TARGET_NR_pread64:
6810 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6811 goto efault;
6812 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6813 unlock_user(p, arg2, ret);
6814 break;
6815 case TARGET_NR_pwrite64:
6816 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6817 goto efault;
6818 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6819 unlock_user(p, arg2, 0);
6820 break;
6821 #endif
6822 case TARGET_NR_getcwd:
6823 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6824 goto efault;
6825 ret = get_errno(sys_getcwd1(p, arg2));
6826 unlock_user(p, arg1, ret);
6827 break;
6828 case TARGET_NR_capget:
6829 goto unimplemented;
6830 case TARGET_NR_capset:
6831 goto unimplemented;
6832 case TARGET_NR_sigaltstack:
6833 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6834 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6835 defined(TARGET_M68K) || defined(TARGET_S390X)
6836 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6837 break;
6838 #else
6839 goto unimplemented;
6840 #endif
6841 case TARGET_NR_sendfile:
6842 goto unimplemented;
6843 #ifdef TARGET_NR_getpmsg
6844 case TARGET_NR_getpmsg:
6845 goto unimplemented;
6846 #endif
6847 #ifdef TARGET_NR_putpmsg
6848 case TARGET_NR_putpmsg:
6849 goto unimplemented;
6850 #endif
6851 #ifdef TARGET_NR_vfork
6852 case TARGET_NR_vfork:
6853 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6854 0, 0, 0, 0));
6855 break;
6856 #endif
6857 #ifdef TARGET_NR_ugetrlimit
6858 case TARGET_NR_ugetrlimit:
6859 {
6860 struct rlimit rlim;
6861 ret = get_errno(getrlimit(arg1, &rlim));
6862 if (!is_error(ret)) {
6863 struct target_rlimit *target_rlim;
6864 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6865 goto efault;
6866 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6867 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6868 unlock_user_struct(target_rlim, arg2, 1);
6869 }
6870 break;
6871 }
6872 #endif
6873 #ifdef TARGET_NR_truncate64
6874 case TARGET_NR_truncate64:
6875 if (!(p = lock_user_string(arg1)))
6876 goto efault;
6877 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6878 unlock_user(p, arg1, 0);
6879 break;
6880 #endif
6881 #ifdef TARGET_NR_ftruncate64
6882 case TARGET_NR_ftruncate64:
6883 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6884 break;
6885 #endif
6886 #ifdef TARGET_NR_stat64
6887 case TARGET_NR_stat64:
6888 if (!(p = lock_user_string(arg1)))
6889 goto efault;
6890 ret = get_errno(stat(path(p), &st));
6891 unlock_user(p, arg1, 0);
6892 if (!is_error(ret))
6893 ret = host_to_target_stat64(cpu_env, arg2, &st);
6894 break;
6895 #endif
6896 #ifdef TARGET_NR_lstat64
6897 case TARGET_NR_lstat64:
6898 if (!(p = lock_user_string(arg1)))
6899 goto efault;
6900 ret = get_errno(lstat(path(p), &st));
6901 unlock_user(p, arg1, 0);
6902 if (!is_error(ret))
6903 ret = host_to_target_stat64(cpu_env, arg2, &st);
6904 break;
6905 #endif
6906 #ifdef TARGET_NR_fstat64
6907 case TARGET_NR_fstat64:
6908 ret = get_errno(fstat(arg1, &st));
6909 if (!is_error(ret))
6910 ret = host_to_target_stat64(cpu_env, arg2, &st);
6911 break;
6912 #endif
6913 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6914 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6915 #ifdef TARGET_NR_fstatat64
6916 case TARGET_NR_fstatat64:
6917 #endif
6918 #ifdef TARGET_NR_newfstatat
6919 case TARGET_NR_newfstatat:
6920 #endif
6921 if (!(p = lock_user_string(arg2)))
6922 goto efault;
6923 #ifdef __NR_fstatat64
6924 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6925 #else
6926 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6927 #endif
6928 if (!is_error(ret))
6929 ret = host_to_target_stat64(cpu_env, arg3, &st);
6930 break;
6931 #endif
6932 case TARGET_NR_lchown:
6933 if (!(p = lock_user_string(arg1)))
6934 goto efault;
6935 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6936 unlock_user(p, arg1, 0);
6937 break;
6938 #ifdef TARGET_NR_getuid
6939 case TARGET_NR_getuid:
6940 ret = get_errno(high2lowuid(getuid()));
6941 break;
6942 #endif
6943 #ifdef TARGET_NR_getgid
6944 case TARGET_NR_getgid:
6945 ret = get_errno(high2lowgid(getgid()));
6946 break;
6947 #endif
6948 #ifdef TARGET_NR_geteuid
6949 case TARGET_NR_geteuid:
6950 ret = get_errno(high2lowuid(geteuid()));
6951 break;
6952 #endif
6953 #ifdef TARGET_NR_getegid
6954 case TARGET_NR_getegid:
6955 ret = get_errno(high2lowgid(getegid()));
6956 break;
6957 #endif
6958 case TARGET_NR_setreuid:
6959 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6960 break;
6961 case TARGET_NR_setregid:
6962 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6963 break;
6964 case TARGET_NR_getgroups:
6965 {
6966 int gidsetsize = arg1;
6967 target_id *target_grouplist;
6968 gid_t *grouplist;
6969 int i;
6970
6971 grouplist = alloca(gidsetsize * sizeof(gid_t));
6972 ret = get_errno(getgroups(gidsetsize, grouplist));
6973 if (gidsetsize == 0)
6974 break;
6975 if (!is_error(ret)) {
6976 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6977 if (!target_grouplist)
6978 goto efault;
6979 for(i = 0;i < ret; i++)
6980 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
6981 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6982 }
6983 }
6984 break;
6985 case TARGET_NR_setgroups:
6986 {
6987 int gidsetsize = arg1;
6988 target_id *target_grouplist;
6989 gid_t *grouplist;
6990 int i;
6991
6992 grouplist = alloca(gidsetsize * sizeof(gid_t));
6993 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
6994 if (!target_grouplist) {
6995 ret = -TARGET_EFAULT;
6996 goto fail;
6997 }
6998 for(i = 0;i < gidsetsize; i++)
6999 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7000 unlock_user(target_grouplist, arg2, 0);
7001 ret = get_errno(setgroups(gidsetsize, grouplist));
7002 }
7003 break;
7004 case TARGET_NR_fchown:
7005 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7006 break;
7007 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7008 case TARGET_NR_fchownat:
7009 if (!(p = lock_user_string(arg2)))
7010 goto efault;
7011 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7012 unlock_user(p, arg2, 0);
7013 break;
7014 #endif
7015 #ifdef TARGET_NR_setresuid
7016 case TARGET_NR_setresuid:
7017 ret = get_errno(setresuid(low2highuid(arg1),
7018 low2highuid(arg2),
7019 low2highuid(arg3)));
7020 break;
7021 #endif
7022 #ifdef TARGET_NR_getresuid
7023 case TARGET_NR_getresuid:
7024 {
7025 uid_t ruid, euid, suid;
7026 ret = get_errno(getresuid(&ruid, &euid, &suid));
7027 if (!is_error(ret)) {
7028 if (put_user_u16(high2lowuid(ruid), arg1)
7029 || put_user_u16(high2lowuid(euid), arg2)
7030 || put_user_u16(high2lowuid(suid), arg3))
7031 goto efault;
7032 }
7033 }
7034 break;
7035 #endif
7036 #ifdef TARGET_NR_getresgid
7037 case TARGET_NR_setresgid:
7038 ret = get_errno(setresgid(low2highgid(arg1),
7039 low2highgid(arg2),
7040 low2highgid(arg3)));
7041 break;
7042 #endif
7043 #ifdef TARGET_NR_getresgid
7044 case TARGET_NR_getresgid:
7045 {
7046 gid_t rgid, egid, sgid;
7047 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7048 if (!is_error(ret)) {
7049 if (put_user_u16(high2lowgid(rgid), arg1)
7050 || put_user_u16(high2lowgid(egid), arg2)
7051 || put_user_u16(high2lowgid(sgid), arg3))
7052 goto efault;
7053 }
7054 }
7055 break;
7056 #endif
7057 case TARGET_NR_chown:
7058 if (!(p = lock_user_string(arg1)))
7059 goto efault;
7060 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7061 unlock_user(p, arg1, 0);
7062 break;
7063 case TARGET_NR_setuid:
7064 ret = get_errno(setuid(low2highuid(arg1)));
7065 break;
7066 case TARGET_NR_setgid:
7067 ret = get_errno(setgid(low2highgid(arg1)));
7068 break;
7069 case TARGET_NR_setfsuid:
7070 ret = get_errno(setfsuid(arg1));
7071 break;
7072 case TARGET_NR_setfsgid:
7073 ret = get_errno(setfsgid(arg1));
7074 break;
7075
7076 #ifdef TARGET_NR_lchown32
7077 case TARGET_NR_lchown32:
7078 if (!(p = lock_user_string(arg1)))
7079 goto efault;
7080 ret = get_errno(lchown(p, arg2, arg3));
7081 unlock_user(p, arg1, 0);
7082 break;
7083 #endif
7084 #ifdef TARGET_NR_getuid32
7085 case TARGET_NR_getuid32:
7086 ret = get_errno(getuid());
7087 break;
7088 #endif
7089
7090 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7091 /* Alpha specific */
7092 case TARGET_NR_getxuid:
7093 {
7094 uid_t euid;
7095 euid=geteuid();
7096 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7097 }
7098 ret = get_errno(getuid());
7099 break;
7100 #endif
7101 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7102 /* Alpha specific */
7103 case TARGET_NR_getxgid:
7104 {
7105 uid_t egid;
7106 egid=getegid();
7107 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7108 }
7109 ret = get_errno(getgid());
7110 break;
7111 #endif
7112 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7113 /* Alpha specific */
7114 case TARGET_NR_osf_getsysinfo:
7115 ret = -TARGET_EOPNOTSUPP;
7116 switch (arg1) {
7117 case TARGET_GSI_IEEE_FP_CONTROL:
7118 {
7119 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7120
7121 /* Copied from linux ieee_fpcr_to_swcr. */
7122 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7123 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7124 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7125 | SWCR_TRAP_ENABLE_DZE
7126 | SWCR_TRAP_ENABLE_OVF);
7127 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7128 | SWCR_TRAP_ENABLE_INE);
7129 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7130 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7131
7132 if (put_user_u64 (swcr, arg2))
7133 goto efault;
7134 ret = 0;
7135 }
7136 break;
7137
7138 /* case GSI_IEEE_STATE_AT_SIGNAL:
7139 -- Not implemented in linux kernel.
7140 case GSI_UACPROC:
7141 -- Retrieves current unaligned access state; not much used.
7142 case GSI_PROC_TYPE:
7143 -- Retrieves implver information; surely not used.
7144 case GSI_GET_HWRPB:
7145 -- Grabs a copy of the HWRPB; surely not used.
7146 */
7147 }
7148 break;
7149 #endif
7150 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7151 /* Alpha specific */
7152 case TARGET_NR_osf_setsysinfo:
7153 ret = -TARGET_EOPNOTSUPP;
7154 switch (arg1) {
7155 case TARGET_SSI_IEEE_FP_CONTROL:
7156 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7157 {
7158 uint64_t swcr, fpcr, orig_fpcr;
7159
7160 if (get_user_u64 (swcr, arg2))
7161 goto efault;
7162 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7163 fpcr = orig_fpcr & FPCR_DYN_MASK;
7164
7165 /* Copied from linux ieee_swcr_to_fpcr. */
7166 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7167 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7168 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7169 | SWCR_TRAP_ENABLE_DZE
7170 | SWCR_TRAP_ENABLE_OVF)) << 48;
7171 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7172 | SWCR_TRAP_ENABLE_INE)) << 57;
7173 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7174 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7175
7176 cpu_alpha_store_fpcr (cpu_env, fpcr);
7177 ret = 0;
7178
7179 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7180 /* Old exceptions are not signaled. */
7181 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7182
7183 /* If any exceptions set by this call, and are unmasked,
7184 send a signal. */
7185 /* ??? FIXME */
7186 }
7187 }
7188 break;
7189
7190 /* case SSI_NVPAIRS:
7191 -- Used with SSIN_UACPROC to enable unaligned accesses.
7192 case SSI_IEEE_STATE_AT_SIGNAL:
7193 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7194 -- Not implemented in linux kernel
7195 */
7196 }
7197 break;
7198 #endif
7199 #ifdef TARGET_NR_osf_sigprocmask
7200 /* Alpha specific. */
7201 case TARGET_NR_osf_sigprocmask:
7202 {
7203 abi_ulong mask;
7204 int how = arg1;
7205 sigset_t set, oldset;
7206
7207 switch(arg1) {
7208 case TARGET_SIG_BLOCK:
7209 how = SIG_BLOCK;
7210 break;
7211 case TARGET_SIG_UNBLOCK:
7212 how = SIG_UNBLOCK;
7213 break;
7214 case TARGET_SIG_SETMASK:
7215 how = SIG_SETMASK;
7216 break;
7217 default:
7218 ret = -TARGET_EINVAL;
7219 goto fail;
7220 }
7221 mask = arg2;
7222 target_to_host_old_sigset(&set, &mask);
7223 sigprocmask(arg1, &set, &oldset);
7224 host_to_target_old_sigset(&mask, &oldset);
7225 ret = mask;
7226 }
7227 break;
7228 #endif
7229
7230 #ifdef TARGET_NR_getgid32
7231 case TARGET_NR_getgid32:
7232 ret = get_errno(getgid());
7233 break;
7234 #endif
7235 #ifdef TARGET_NR_geteuid32
7236 case TARGET_NR_geteuid32:
7237 ret = get_errno(geteuid());
7238 break;
7239 #endif
7240 #ifdef TARGET_NR_getegid32
7241 case TARGET_NR_getegid32:
7242 ret = get_errno(getegid());
7243 break;
7244 #endif
7245 #ifdef TARGET_NR_setreuid32
7246 case TARGET_NR_setreuid32:
7247 ret = get_errno(setreuid(arg1, arg2));
7248 break;
7249 #endif
7250 #ifdef TARGET_NR_setregid32
7251 case TARGET_NR_setregid32:
7252 ret = get_errno(setregid(arg1, arg2));
7253 break;
7254 #endif
7255 #ifdef TARGET_NR_getgroups32
7256 case TARGET_NR_getgroups32:
7257 {
7258 int gidsetsize = arg1;
7259 uint32_t *target_grouplist;
7260 gid_t *grouplist;
7261 int i;
7262
7263 grouplist = alloca(gidsetsize * sizeof(gid_t));
7264 ret = get_errno(getgroups(gidsetsize, grouplist));
7265 if (gidsetsize == 0)
7266 break;
7267 if (!is_error(ret)) {
7268 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7269 if (!target_grouplist) {
7270 ret = -TARGET_EFAULT;
7271 goto fail;
7272 }
7273 for(i = 0;i < ret; i++)
7274 target_grouplist[i] = tswap32(grouplist[i]);
7275 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7276 }
7277 }
7278 break;
7279 #endif
7280 #ifdef TARGET_NR_setgroups32
7281 case TARGET_NR_setgroups32:
7282 {
7283 int gidsetsize = arg1;
7284 uint32_t *target_grouplist;
7285 gid_t *grouplist;
7286 int i;
7287
7288 grouplist = alloca(gidsetsize * sizeof(gid_t));
7289 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7290 if (!target_grouplist) {
7291 ret = -TARGET_EFAULT;
7292 goto fail;
7293 }
7294 for(i = 0;i < gidsetsize; i++)
7295 grouplist[i] = tswap32(target_grouplist[i]);
7296 unlock_user(target_grouplist, arg2, 0);
7297 ret = get_errno(setgroups(gidsetsize, grouplist));
7298 }
7299 break;
7300 #endif
7301 #ifdef TARGET_NR_fchown32
7302 case TARGET_NR_fchown32:
7303 ret = get_errno(fchown(arg1, arg2, arg3));
7304 break;
7305 #endif
7306 #ifdef TARGET_NR_setresuid32
7307 case TARGET_NR_setresuid32:
7308 ret = get_errno(setresuid(arg1, arg2, arg3));
7309 break;
7310 #endif
7311 #ifdef TARGET_NR_getresuid32
7312 case TARGET_NR_getresuid32:
7313 {
7314 uid_t ruid, euid, suid;
7315 ret = get_errno(getresuid(&ruid, &euid, &suid));
7316 if (!is_error(ret)) {
7317 if (put_user_u32(ruid, arg1)
7318 || put_user_u32(euid, arg2)
7319 || put_user_u32(suid, arg3))
7320 goto efault;
7321 }
7322 }
7323 break;
7324 #endif
7325 #ifdef TARGET_NR_setresgid32
7326 case TARGET_NR_setresgid32:
7327 ret = get_errno(setresgid(arg1, arg2, arg3));
7328 break;
7329 #endif
7330 #ifdef TARGET_NR_getresgid32
7331 case TARGET_NR_getresgid32:
7332 {
7333 gid_t rgid, egid, sgid;
7334 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7335 if (!is_error(ret)) {
7336 if (put_user_u32(rgid, arg1)
7337 || put_user_u32(egid, arg2)
7338 || put_user_u32(sgid, arg3))
7339 goto efault;
7340 }
7341 }
7342 break;
7343 #endif
7344 #ifdef TARGET_NR_chown32
7345 case TARGET_NR_chown32:
7346 if (!(p = lock_user_string(arg1)))
7347 goto efault;
7348 ret = get_errno(chown(p, arg2, arg3));
7349 unlock_user(p, arg1, 0);
7350 break;
7351 #endif
7352 #ifdef TARGET_NR_setuid32
7353 case TARGET_NR_setuid32:
7354 ret = get_errno(setuid(arg1));
7355 break;
7356 #endif
7357 #ifdef TARGET_NR_setgid32
7358 case TARGET_NR_setgid32:
7359 ret = get_errno(setgid(arg1));
7360 break;
7361 #endif
7362 #ifdef TARGET_NR_setfsuid32
7363 case TARGET_NR_setfsuid32:
7364 ret = get_errno(setfsuid(arg1));
7365 break;
7366 #endif
7367 #ifdef TARGET_NR_setfsgid32
7368 case TARGET_NR_setfsgid32:
7369 ret = get_errno(setfsgid(arg1));
7370 break;
7371 #endif
7372
7373 case TARGET_NR_pivot_root:
7374 goto unimplemented;
7375 #ifdef TARGET_NR_mincore
7376 case TARGET_NR_mincore:
7377 {
7378 void *a;
7379 ret = -TARGET_EFAULT;
7380 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7381 goto efault;
7382 if (!(p = lock_user_string(arg3)))
7383 goto mincore_fail;
7384 ret = get_errno(mincore(a, arg2, p));
7385 unlock_user(p, arg3, ret);
7386 mincore_fail:
7387 unlock_user(a, arg1, 0);
7388 }
7389 break;
7390 #endif
7391 #ifdef TARGET_NR_arm_fadvise64_64
7392 case TARGET_NR_arm_fadvise64_64:
7393 {
7394 /*
7395 * arm_fadvise64_64 looks like fadvise64_64 but
7396 * with different argument order
7397 */
7398 abi_long temp;
7399 temp = arg3;
7400 arg3 = arg4;
7401 arg4 = temp;
7402 }
7403 #endif
7404 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7405 #ifdef TARGET_NR_fadvise64_64
7406 case TARGET_NR_fadvise64_64:
7407 #endif
7408 #ifdef TARGET_NR_fadvise64
7409 case TARGET_NR_fadvise64:
7410 #endif
7411 #ifdef TARGET_S390X
7412 switch (arg4) {
7413 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7414 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7415 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7416 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7417 default: break;
7418 }
7419 #endif
7420 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7421 break;
7422 #endif
7423 #ifdef TARGET_NR_madvise
7424 case TARGET_NR_madvise:
7425 /* A straight passthrough may not be safe because qemu sometimes
7426 turns private flie-backed mappings into anonymous mappings.
7427 This will break MADV_DONTNEED.
7428 This is a hint, so ignoring and returning success is ok. */
7429 ret = get_errno(0);
7430 break;
7431 #endif
7432 #if TARGET_ABI_BITS == 32
7433 case TARGET_NR_fcntl64:
7434 {
7435 int cmd;
7436 struct flock64 fl;
7437 struct target_flock64 *target_fl;
7438 #ifdef TARGET_ARM
7439 struct target_eabi_flock64 *target_efl;
7440 #endif
7441
7442 cmd = target_to_host_fcntl_cmd(arg2);
7443 if (cmd == -TARGET_EINVAL)
7444 return cmd;
7445
7446 switch(arg2) {
7447 case TARGET_F_GETLK64:
7448 #ifdef TARGET_ARM
7449 if (((CPUARMState *)cpu_env)->eabi) {
7450 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7451 goto efault;
7452 fl.l_type = tswap16(target_efl->l_type);
7453 fl.l_whence = tswap16(target_efl->l_whence);
7454 fl.l_start = tswap64(target_efl->l_start);
7455 fl.l_len = tswap64(target_efl->l_len);
7456 fl.l_pid = tswap32(target_efl->l_pid);
7457 unlock_user_struct(target_efl, arg3, 0);
7458 } else
7459 #endif
7460 {
7461 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7462 goto efault;
7463 fl.l_type = tswap16(target_fl->l_type);
7464 fl.l_whence = tswap16(target_fl->l_whence);
7465 fl.l_start = tswap64(target_fl->l_start);
7466 fl.l_len = tswap64(target_fl->l_len);
7467 fl.l_pid = tswap32(target_fl->l_pid);
7468 unlock_user_struct(target_fl, arg3, 0);
7469 }
7470 ret = get_errno(fcntl(arg1, cmd, &fl));
7471 if (ret == 0) {
7472 #ifdef TARGET_ARM
7473 if (((CPUARMState *)cpu_env)->eabi) {
7474 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7475 goto efault;
7476 target_efl->l_type = tswap16(fl.l_type);
7477 target_efl->l_whence = tswap16(fl.l_whence);
7478 target_efl->l_start = tswap64(fl.l_start);
7479 target_efl->l_len = tswap64(fl.l_len);
7480 target_efl->l_pid = tswap32(fl.l_pid);
7481 unlock_user_struct(target_efl, arg3, 1);
7482 } else
7483 #endif
7484 {
7485 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7486 goto efault;
7487 target_fl->l_type = tswap16(fl.l_type);
7488 target_fl->l_whence = tswap16(fl.l_whence);
7489 target_fl->l_start = tswap64(fl.l_start);
7490 target_fl->l_len = tswap64(fl.l_len);
7491 target_fl->l_pid = tswap32(fl.l_pid);
7492 unlock_user_struct(target_fl, arg3, 1);
7493 }
7494 }
7495 break;
7496
7497 case TARGET_F_SETLK64:
7498 case TARGET_F_SETLKW64:
7499 #ifdef TARGET_ARM
7500 if (((CPUARMState *)cpu_env)->eabi) {
7501 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7502 goto efault;
7503 fl.l_type = tswap16(target_efl->l_type);
7504 fl.l_whence = tswap16(target_efl->l_whence);
7505 fl.l_start = tswap64(target_efl->l_start);
7506 fl.l_len = tswap64(target_efl->l_len);
7507 fl.l_pid = tswap32(target_efl->l_pid);
7508 unlock_user_struct(target_efl, arg3, 0);
7509 } else
7510 #endif
7511 {
7512 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7513 goto efault;
7514 fl.l_type = tswap16(target_fl->l_type);
7515 fl.l_whence = tswap16(target_fl->l_whence);
7516 fl.l_start = tswap64(target_fl->l_start);
7517 fl.l_len = tswap64(target_fl->l_len);
7518 fl.l_pid = tswap32(target_fl->l_pid);
7519 unlock_user_struct(target_fl, arg3, 0);
7520 }
7521 ret = get_errno(fcntl(arg1, cmd, &fl));
7522 break;
7523 default:
7524 ret = do_fcntl(arg1, arg2, arg3);
7525 break;
7526 }
7527 break;
7528 }
7529 #endif
7530 #ifdef TARGET_NR_cacheflush
7531 case TARGET_NR_cacheflush:
7532 /* self-modifying code is handled automatically, so nothing needed */
7533 ret = 0;
7534 break;
7535 #endif
7536 #ifdef TARGET_NR_security
7537 case TARGET_NR_security:
7538 goto unimplemented;
7539 #endif
7540 #ifdef TARGET_NR_getpagesize
7541 case TARGET_NR_getpagesize:
7542 ret = TARGET_PAGE_SIZE;
7543 break;
7544 #endif
7545 case TARGET_NR_gettid:
7546 ret = get_errno(gettid());
7547 break;
7548 #ifdef TARGET_NR_readahead
7549 case TARGET_NR_readahead:
7550 #if TARGET_ABI_BITS == 32
7551 #ifdef TARGET_ARM
7552 if (((CPUARMState *)cpu_env)->eabi)
7553 {
7554 arg2 = arg3;
7555 arg3 = arg4;
7556 arg4 = arg5;
7557 }
7558 #endif
7559 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7560 #else
7561 ret = get_errno(readahead(arg1, arg2, arg3));
7562 #endif
7563 break;
7564 #endif
7565 #ifdef TARGET_NR_setxattr
7566 case TARGET_NR_setxattr:
7567 case TARGET_NR_lsetxattr:
7568 case TARGET_NR_fsetxattr:
7569 case TARGET_NR_getxattr:
7570 case TARGET_NR_lgetxattr:
7571 case TARGET_NR_fgetxattr:
7572 case TARGET_NR_listxattr:
7573 case TARGET_NR_llistxattr:
7574 case TARGET_NR_flistxattr:
7575 case TARGET_NR_removexattr:
7576 case TARGET_NR_lremovexattr:
7577 case TARGET_NR_fremovexattr:
7578 ret = -TARGET_EOPNOTSUPP;
7579 break;
7580 #endif
7581 #ifdef TARGET_NR_set_thread_area
7582 case TARGET_NR_set_thread_area:
7583 #if defined(TARGET_MIPS)
7584 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7585 ret = 0;
7586 break;
7587 #elif defined(TARGET_CRIS)
7588 if (arg1 & 0xff)
7589 ret = -TARGET_EINVAL;
7590 else {
7591 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7592 ret = 0;
7593 }
7594 break;
7595 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7596 ret = do_set_thread_area(cpu_env, arg1);
7597 break;
7598 #else
7599 goto unimplemented_nowarn;
7600 #endif
7601 #endif
7602 #ifdef TARGET_NR_get_thread_area
7603 case TARGET_NR_get_thread_area:
7604 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7605 ret = do_get_thread_area(cpu_env, arg1);
7606 #else
7607 goto unimplemented_nowarn;
7608 #endif
7609 #endif
7610 #ifdef TARGET_NR_getdomainname
7611 case TARGET_NR_getdomainname:
7612 goto unimplemented_nowarn;
7613 #endif
7614
7615 #ifdef TARGET_NR_clock_gettime
7616 case TARGET_NR_clock_gettime:
7617 {
7618 struct timespec ts;
7619 ret = get_errno(clock_gettime(arg1, &ts));
7620 if (!is_error(ret)) {
7621 host_to_target_timespec(arg2, &ts);
7622 }
7623 break;
7624 }
7625 #endif
7626 #ifdef TARGET_NR_clock_getres
7627 case TARGET_NR_clock_getres:
7628 {
7629 struct timespec ts;
7630 ret = get_errno(clock_getres(arg1, &ts));
7631 if (!is_error(ret)) {
7632 host_to_target_timespec(arg2, &ts);
7633 }
7634 break;
7635 }
7636 #endif
7637 #ifdef TARGET_NR_clock_nanosleep
7638 case TARGET_NR_clock_nanosleep:
7639 {
7640 struct timespec ts;
7641 target_to_host_timespec(&ts, arg3);
7642 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7643 if (arg4)
7644 host_to_target_timespec(arg4, &ts);
7645 break;
7646 }
7647 #endif
7648
7649 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7650 case TARGET_NR_set_tid_address:
7651 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7652 break;
7653 #endif
7654
7655 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7656 case TARGET_NR_tkill:
7657 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7658 break;
7659 #endif
7660
7661 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7662 case TARGET_NR_tgkill:
7663 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7664 target_to_host_signal(arg3)));
7665 break;
7666 #endif
7667
7668 #ifdef TARGET_NR_set_robust_list
7669 case TARGET_NR_set_robust_list:
7670 goto unimplemented_nowarn;
7671 #endif
7672
7673 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7674 case TARGET_NR_utimensat:
7675 {
7676 struct timespec *tsp, ts[2];
7677 if (!arg3) {
7678 tsp = NULL;
7679 } else {
7680 target_to_host_timespec(ts, arg3);
7681 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7682 tsp = ts;
7683 }
7684 if (!arg2)
7685 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7686 else {
7687 if (!(p = lock_user_string(arg2))) {
7688 ret = -TARGET_EFAULT;
7689 goto fail;
7690 }
7691 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7692 unlock_user(p, arg2, 0);
7693 }
7694 }
7695 break;
7696 #endif
7697 #if defined(CONFIG_USE_NPTL)
7698 case TARGET_NR_futex:
7699 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7700 break;
7701 #endif
7702 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7703 case TARGET_NR_inotify_init:
7704 ret = get_errno(sys_inotify_init());
7705 break;
7706 #endif
7707 #ifdef CONFIG_INOTIFY1
7708 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7709 case TARGET_NR_inotify_init1:
7710 ret = get_errno(sys_inotify_init1(arg1));
7711 break;
7712 #endif
7713 #endif
7714 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7715 case TARGET_NR_inotify_add_watch:
7716 p = lock_user_string(arg2);
7717 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7718 unlock_user(p, arg2, 0);
7719 break;
7720 #endif
7721 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7722 case TARGET_NR_inotify_rm_watch:
7723 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7724 break;
7725 #endif
7726
7727 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7728 case TARGET_NR_mq_open:
7729 {
7730 struct mq_attr posix_mq_attr;
7731
7732 p = lock_user_string(arg1 - 1);
7733 if (arg4 != 0)
7734 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7735 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7736 unlock_user (p, arg1, 0);
7737 }
7738 break;
7739
7740 case TARGET_NR_mq_unlink:
7741 p = lock_user_string(arg1 - 1);
7742 ret = get_errno(mq_unlink(p));
7743 unlock_user (p, arg1, 0);
7744 break;
7745
7746 case TARGET_NR_mq_timedsend:
7747 {
7748 struct timespec ts;
7749
7750 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7751 if (arg5 != 0) {
7752 target_to_host_timespec(&ts, arg5);
7753 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7754 host_to_target_timespec(arg5, &ts);
7755 }
7756 else
7757 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7758 unlock_user (p, arg2, arg3);
7759 }
7760 break;
7761
7762 case TARGET_NR_mq_timedreceive:
7763 {
7764 struct timespec ts;
7765 unsigned int prio;
7766
7767 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7768 if (arg5 != 0) {
7769 target_to_host_timespec(&ts, arg5);
7770 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7771 host_to_target_timespec(arg5, &ts);
7772 }
7773 else
7774 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7775 unlock_user (p, arg2, arg3);
7776 if (arg4 != 0)
7777 put_user_u32(prio, arg4);
7778 }
7779 break;
7780
7781 /* Not implemented for now... */
7782 /* case TARGET_NR_mq_notify: */
7783 /* break; */
7784
7785 case TARGET_NR_mq_getsetattr:
7786 {
7787 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7788 ret = 0;
7789 if (arg3 != 0) {
7790 ret = mq_getattr(arg1, &posix_mq_attr_out);
7791 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7792 }
7793 if (arg2 != 0) {
7794 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7795 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7796 }
7797
7798 }
7799 break;
7800 #endif
7801
7802 #ifdef CONFIG_SPLICE
7803 #ifdef TARGET_NR_tee
7804 case TARGET_NR_tee:
7805 {
7806 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7807 }
7808 break;
7809 #endif
7810 #ifdef TARGET_NR_splice
7811 case TARGET_NR_splice:
7812 {
7813 loff_t loff_in, loff_out;
7814 loff_t *ploff_in = NULL, *ploff_out = NULL;
7815 if(arg2) {
7816 get_user_u64(loff_in, arg2);
7817 ploff_in = &loff_in;
7818 }
7819 if(arg4) {
7820 get_user_u64(loff_out, arg2);
7821 ploff_out = &loff_out;
7822 }
7823 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
7824 }
7825 break;
7826 #endif
7827 #ifdef TARGET_NR_vmsplice
7828 case TARGET_NR_vmsplice:
7829 {
7830 int count = arg3;
7831 struct iovec *vec;
7832
7833 vec = alloca(count * sizeof(struct iovec));
7834 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7835 goto efault;
7836 ret = get_errno(vmsplice(arg1, vec, count, arg4));
7837 unlock_iovec(vec, arg2, count, 0);
7838 }
7839 break;
7840 #endif
7841 #endif /* CONFIG_SPLICE */
7842 #ifdef CONFIG_EVENTFD
7843 #if defined(TARGET_NR_eventfd)
7844 case TARGET_NR_eventfd:
7845 ret = get_errno(eventfd(arg1, 0));
7846 break;
7847 #endif
7848 #if defined(TARGET_NR_eventfd2)
7849 case TARGET_NR_eventfd2:
7850 ret = get_errno(eventfd(arg1, arg2));
7851 break;
7852 #endif
7853 #endif /* CONFIG_EVENTFD */
7854 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7855 case TARGET_NR_fallocate:
7856 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
7857 break;
7858 #endif
7859 #if defined(CONFIG_SYNC_FILE_RANGE)
7860 #if defined(TARGET_NR_sync_file_range)
7861 case TARGET_NR_sync_file_range:
7862 #if TARGET_ABI_BITS == 32
7863 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
7864 target_offset64(arg4, arg5), arg6));
7865 #else
7866 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
7867 #endif
7868 break;
7869 #endif
7870 #if defined(TARGET_NR_sync_file_range2)
7871 case TARGET_NR_sync_file_range2:
7872 /* This is like sync_file_range but the arguments are reordered */
7873 #if TARGET_ABI_BITS == 32
7874 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7875 target_offset64(arg5, arg6), arg2));
7876 #else
7877 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
7878 #endif
7879 break;
7880 #endif
7881 #endif
7882 #if defined(CONFIG_EPOLL)
7883 #if defined(TARGET_NR_epoll_create)
7884 case TARGET_NR_epoll_create:
7885 ret = get_errno(epoll_create(arg1));
7886 break;
7887 #endif
7888 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7889 case TARGET_NR_epoll_create1:
7890 ret = get_errno(epoll_create1(arg1));
7891 break;
7892 #endif
7893 #if defined(TARGET_NR_epoll_ctl)
7894 case TARGET_NR_epoll_ctl:
7895 {
7896 struct epoll_event ep;
7897 struct epoll_event *epp = 0;
7898 if (arg4) {
7899 struct target_epoll_event *target_ep;
7900 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
7901 goto efault;
7902 }
7903 ep.events = tswap32(target_ep->events);
7904 /* The epoll_data_t union is just opaque data to the kernel,
7905 * so we transfer all 64 bits across and need not worry what
7906 * actual data type it is.
7907 */
7908 ep.data.u64 = tswap64(target_ep->data.u64);
7909 unlock_user_struct(target_ep, arg4, 0);
7910 epp = &ep;
7911 }
7912 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
7913 break;
7914 }
7915 #endif
7916
7917 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7918 #define IMPLEMENT_EPOLL_PWAIT
7919 #endif
7920 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7921 #if defined(TARGET_NR_epoll_wait)
7922 case TARGET_NR_epoll_wait:
7923 #endif
7924 #if defined(IMPLEMENT_EPOLL_PWAIT)
7925 case TARGET_NR_epoll_pwait:
7926 #endif
7927 {
7928 struct target_epoll_event *target_ep;
7929 struct epoll_event *ep;
7930 int epfd = arg1;
7931 int maxevents = arg3;
7932 int timeout = arg4;
7933
7934 target_ep = lock_user(VERIFY_WRITE, arg2,
7935 maxevents * sizeof(struct target_epoll_event), 1);
7936 if (!target_ep) {
7937 goto efault;
7938 }
7939
7940 ep = alloca(maxevents * sizeof(struct epoll_event));
7941
7942 switch (num) {
7943 #if defined(IMPLEMENT_EPOLL_PWAIT)
7944 case TARGET_NR_epoll_pwait:
7945 {
7946 target_sigset_t *target_set;
7947 sigset_t _set, *set = &_set;
7948
7949 if (arg5) {
7950 target_set = lock_user(VERIFY_READ, arg5,
7951 sizeof(target_sigset_t), 1);
7952 if (!target_set) {
7953 unlock_user(target_ep, arg2, 0);
7954 goto efault;
7955 }
7956 target_to_host_sigset(set, target_set);
7957 unlock_user(target_set, arg5, 0);
7958 } else {
7959 set = NULL;
7960 }
7961
7962 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
7963 break;
7964 }
7965 #endif
7966 #if defined(TARGET_NR_epoll_wait)
7967 case TARGET_NR_epoll_wait:
7968 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
7969 break;
7970 #endif
7971 default:
7972 ret = -TARGET_ENOSYS;
7973 }
7974 if (!is_error(ret)) {
7975 int i;
7976 for (i = 0; i < ret; i++) {
7977 target_ep[i].events = tswap32(ep[i].events);
7978 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
7979 }
7980 }
7981 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
7982 break;
7983 }
7984 #endif
7985 #endif
7986 default:
7987 unimplemented:
7988 gemu_log("qemu: Unsupported syscall: %d\n", num);
7989 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7990 unimplemented_nowarn:
7991 #endif
7992 ret = -TARGET_ENOSYS;
7993 break;
7994 }
7995 fail:
7996 #ifdef DEBUG
7997 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
7998 #endif
7999 if(do_strace)
8000 print_syscall_ret(num, ret);
8001 return ret;
8002 efault:
8003 ret = -TARGET_EFAULT;
8004 goto fail;
8005 }