]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
linux-user: Implement prlimit64 syscall
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <qemu-common.h>
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
80
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #include <linux/vt.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
97
98 #include "qemu.h"
99 #include "qemu-common.h"
100
101 #if defined(CONFIG_USE_NPTL)
102 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
103 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
104 #else
105 /* XXX: Hardcode the above values. */
106 #define CLONE_NPTL_FLAGS2 0
107 #endif
108
109 //#define DEBUG
110
111 //#include <linux/msdos_fs.h>
112 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
113 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
114
115
116 #undef _syscall0
117 #undef _syscall1
118 #undef _syscall2
119 #undef _syscall3
120 #undef _syscall4
121 #undef _syscall5
122 #undef _syscall6
123
124 #define _syscall0(type,name) \
125 static type name (void) \
126 { \
127 return syscall(__NR_##name); \
128 }
129
130 #define _syscall1(type,name,type1,arg1) \
131 static type name (type1 arg1) \
132 { \
133 return syscall(__NR_##name, arg1); \
134 }
135
136 #define _syscall2(type,name,type1,arg1,type2,arg2) \
137 static type name (type1 arg1,type2 arg2) \
138 { \
139 return syscall(__NR_##name, arg1, arg2); \
140 }
141
142 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
143 static type name (type1 arg1,type2 arg2,type3 arg3) \
144 { \
145 return syscall(__NR_##name, arg1, arg2, arg3); \
146 }
147
148 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
150 { \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
152 }
153
154 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
155 type5,arg5) \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
157 { \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
159 }
160
161
162 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5,type6,arg6) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
165 type6 arg6) \
166 { \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
168 }
169
170
171 #define __NR_sys_uname __NR_uname
172 #define __NR_sys_faccessat __NR_faccessat
173 #define __NR_sys_fchmodat __NR_fchmodat
174 #define __NR_sys_fchownat __NR_fchownat
175 #define __NR_sys_fstatat64 __NR_fstatat64
176 #define __NR_sys_futimesat __NR_futimesat
177 #define __NR_sys_getcwd1 __NR_getcwd
178 #define __NR_sys_getdents __NR_getdents
179 #define __NR_sys_getdents64 __NR_getdents64
180 #define __NR_sys_getpriority __NR_getpriority
181 #define __NR_sys_linkat __NR_linkat
182 #define __NR_sys_mkdirat __NR_mkdirat
183 #define __NR_sys_mknodat __NR_mknodat
184 #define __NR_sys_newfstatat __NR_newfstatat
185 #define __NR_sys_openat __NR_openat
186 #define __NR_sys_readlinkat __NR_readlinkat
187 #define __NR_sys_renameat __NR_renameat
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_symlinkat __NR_symlinkat
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_unlinkat __NR_unlinkat
194 #define __NR_sys_utimensat __NR_utimensat
195 #define __NR_sys_futex __NR_futex
196 #define __NR_sys_inotify_init __NR_inotify_init
197 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
198 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
199
200 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
201 defined(__s390x__)
202 #define __NR__llseek __NR_lseek
203 #endif
204
205 #ifdef __NR_gettid
206 _syscall0(int, gettid)
207 #else
208 /* This is a replacement for the host gettid() and must return a host
209 errno. */
210 static int gettid(void) {
211 return -ENOSYS;
212 }
213 #endif
214 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
217 #endif
218 _syscall2(int, sys_getpriority, int, which, int, who);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
222 #endif
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
227 #endif
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill,int,tid,int,sig)
230 #endif
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
233 #endif
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
236 #endif
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
240 const struct timespec *,timeout,int *,uaddr2,int,val3)
241 #endif
242 #endif
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249
250 static bitmask_transtbl fcntl_flags_tbl[] = {
251 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
252 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
253 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
254 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
255 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
256 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
257 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
258 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
259 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
260 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
261 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
262 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
263 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
264 #if defined(O_DIRECT)
265 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
266 #endif
267 { 0, 0, 0, 0 }
268 };
269
270 #define COPY_UTSNAME_FIELD(dest, src) \
271 do { \
272 /* __NEW_UTS_LEN doesn't include terminating null */ \
273 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
274 (dest)[__NEW_UTS_LEN] = '\0'; \
275 } while (0)
276
277 static int sys_uname(struct new_utsname *buf)
278 {
279 struct utsname uts_buf;
280
281 if (uname(&uts_buf) < 0)
282 return (-1);
283
284 /*
285 * Just in case these have some differences, we
286 * translate utsname to new_utsname (which is the
287 * struct linux kernel uses).
288 */
289
290 memset(buf, 0, sizeof(*buf));
291 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
292 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
293 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
294 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
295 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
296 #ifdef _GNU_SOURCE
297 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
298 #endif
299 return (0);
300
301 #undef COPY_UTSNAME_FIELD
302 }
303
304 static int sys_getcwd1(char *buf, size_t size)
305 {
306 if (getcwd(buf, size) == NULL) {
307 /* getcwd() sets errno */
308 return (-1);
309 }
310 return strlen(buf)+1;
311 }
312
313 #ifdef CONFIG_ATFILE
314 /*
315 * Host system seems to have atfile syscall stubs available. We
316 * now enable them one by one as specified by target syscall_nr.h.
317 */
318
319 #ifdef TARGET_NR_faccessat
320 static int sys_faccessat(int dirfd, const char *pathname, int mode)
321 {
322 return (faccessat(dirfd, pathname, mode, 0));
323 }
324 #endif
325 #ifdef TARGET_NR_fchmodat
326 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
327 {
328 return (fchmodat(dirfd, pathname, mode, 0));
329 }
330 #endif
331 #if defined(TARGET_NR_fchownat)
332 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
333 gid_t group, int flags)
334 {
335 return (fchownat(dirfd, pathname, owner, group, flags));
336 }
337 #endif
338 #ifdef __NR_fstatat64
339 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
340 int flags)
341 {
342 return (fstatat(dirfd, pathname, buf, flags));
343 }
344 #endif
345 #ifdef __NR_newfstatat
346 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
347 int flags)
348 {
349 return (fstatat(dirfd, pathname, buf, flags));
350 }
351 #endif
352 #ifdef TARGET_NR_futimesat
353 static int sys_futimesat(int dirfd, const char *pathname,
354 const struct timeval times[2])
355 {
356 return (futimesat(dirfd, pathname, times));
357 }
358 #endif
359 #ifdef TARGET_NR_linkat
360 static int sys_linkat(int olddirfd, const char *oldpath,
361 int newdirfd, const char *newpath, int flags)
362 {
363 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
364 }
365 #endif
366 #ifdef TARGET_NR_mkdirat
367 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
368 {
369 return (mkdirat(dirfd, pathname, mode));
370 }
371 #endif
372 #ifdef TARGET_NR_mknodat
373 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
374 dev_t dev)
375 {
376 return (mknodat(dirfd, pathname, mode, dev));
377 }
378 #endif
379 #ifdef TARGET_NR_openat
380 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
381 {
382 /*
383 * open(2) has extra parameter 'mode' when called with
384 * flag O_CREAT.
385 */
386 if ((flags & O_CREAT) != 0) {
387 va_list ap;
388 mode_t mode;
389
390 /*
391 * Get the 'mode' parameter and translate it to
392 * host bits.
393 */
394 va_start(ap, flags);
395 mode = va_arg(ap, mode_t);
396 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
397 va_end(ap);
398
399 return (openat(dirfd, pathname, flags, mode));
400 }
401 return (openat(dirfd, pathname, flags));
402 }
403 #endif
404 #ifdef TARGET_NR_readlinkat
405 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
406 {
407 return (readlinkat(dirfd, pathname, buf, bufsiz));
408 }
409 #endif
410 #ifdef TARGET_NR_renameat
411 static int sys_renameat(int olddirfd, const char *oldpath,
412 int newdirfd, const char *newpath)
413 {
414 return (renameat(olddirfd, oldpath, newdirfd, newpath));
415 }
416 #endif
417 #ifdef TARGET_NR_symlinkat
418 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
419 {
420 return (symlinkat(oldpath, newdirfd, newpath));
421 }
422 #endif
423 #ifdef TARGET_NR_unlinkat
424 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
425 {
426 return (unlinkat(dirfd, pathname, flags));
427 }
428 #endif
429 #else /* !CONFIG_ATFILE */
430
431 /*
432 * Try direct syscalls instead
433 */
434 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
435 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
436 #endif
437 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
438 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
439 #endif
440 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
441 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
442 uid_t,owner,gid_t,group,int,flags)
443 #endif
444 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
445 defined(__NR_fstatat64)
446 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
447 struct stat *,buf,int,flags)
448 #endif
449 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
450 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
451 const struct timeval *,times)
452 #endif
453 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
454 defined(__NR_newfstatat)
455 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
456 struct stat *,buf,int,flags)
457 #endif
458 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
459 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
460 int,newdirfd,const char *,newpath,int,flags)
461 #endif
462 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
463 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
464 #endif
465 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
466 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
467 mode_t,mode,dev_t,dev)
468 #endif
469 #if defined(TARGET_NR_openat) && defined(__NR_openat)
470 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
471 #endif
472 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
473 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
474 char *,buf,size_t,bufsize)
475 #endif
476 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
477 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
478 int,newdirfd,const char *,newpath)
479 #endif
480 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
481 _syscall3(int,sys_symlinkat,const char *,oldpath,
482 int,newdirfd,const char *,newpath)
483 #endif
484 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
485 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
486 #endif
487
488 #endif /* CONFIG_ATFILE */
489
490 #ifdef CONFIG_UTIMENSAT
491 static int sys_utimensat(int dirfd, const char *pathname,
492 const struct timespec times[2], int flags)
493 {
494 if (pathname == NULL)
495 return futimens(dirfd, times);
496 else
497 return utimensat(dirfd, pathname, times, flags);
498 }
499 #else
500 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
501 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
502 const struct timespec *,tsp,int,flags)
503 #endif
504 #endif /* CONFIG_UTIMENSAT */
505
506 #ifdef CONFIG_INOTIFY
507 #include <sys/inotify.h>
508
509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
510 static int sys_inotify_init(void)
511 {
512 return (inotify_init());
513 }
514 #endif
515 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
516 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
517 {
518 return (inotify_add_watch(fd, pathname, mask));
519 }
520 #endif
521 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
522 static int sys_inotify_rm_watch(int fd, int32_t wd)
523 {
524 return (inotify_rm_watch(fd, wd));
525 }
526 #endif
527 #ifdef CONFIG_INOTIFY1
528 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
529 static int sys_inotify_init1(int flags)
530 {
531 return (inotify_init1(flags));
532 }
533 #endif
534 #endif
535 #else
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY */
542
543 #if defined(TARGET_NR_ppoll)
544 #ifndef __NR_ppoll
545 # define __NR_ppoll -1
546 #endif
547 #define __NR_sys_ppoll __NR_ppoll
548 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
549 struct timespec *, timeout, const __sigset_t *, sigmask,
550 size_t, sigsetsize)
551 #endif
552
553 #if defined(TARGET_NR_pselect6)
554 #ifndef __NR_pselect6
555 # define __NR_pselect6 -1
556 #endif
557 #define __NR_sys_pselect6 __NR_pselect6
558 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
559 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
560 #endif
561
562 #if defined(TARGET_NR_prlimit64)
563 #ifndef __NR_prlimit64
564 # define __NR_prlimit64 -1
565 #endif
566 #define __NR_sys_prlimit64 __NR_prlimit64
567 /* The glibc rlimit structure may not be that used by the underlying syscall */
568 struct host_rlimit64 {
569 uint64_t rlim_cur;
570 uint64_t rlim_max;
571 };
572 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
573 const struct host_rlimit64 *, new_limit,
574 struct host_rlimit64 *, old_limit)
575 #endif
576
577 extern int personality(int);
578 extern int flock(int, int);
579 extern int setfsuid(int);
580 extern int setfsgid(int);
581 extern int setgroups(int, gid_t *);
582
583 #define ERRNO_TABLE_SIZE 1200
584
585 /* target_to_host_errno_table[] is initialized from
586 * host_to_target_errno_table[] in syscall_init(). */
587 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
588 };
589
590 /*
591 * This list is the union of errno values overridden in asm-<arch>/errno.h
592 * minus the errnos that are not actually generic to all archs.
593 */
594 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
595 [EIDRM] = TARGET_EIDRM,
596 [ECHRNG] = TARGET_ECHRNG,
597 [EL2NSYNC] = TARGET_EL2NSYNC,
598 [EL3HLT] = TARGET_EL3HLT,
599 [EL3RST] = TARGET_EL3RST,
600 [ELNRNG] = TARGET_ELNRNG,
601 [EUNATCH] = TARGET_EUNATCH,
602 [ENOCSI] = TARGET_ENOCSI,
603 [EL2HLT] = TARGET_EL2HLT,
604 [EDEADLK] = TARGET_EDEADLK,
605 [ENOLCK] = TARGET_ENOLCK,
606 [EBADE] = TARGET_EBADE,
607 [EBADR] = TARGET_EBADR,
608 [EXFULL] = TARGET_EXFULL,
609 [ENOANO] = TARGET_ENOANO,
610 [EBADRQC] = TARGET_EBADRQC,
611 [EBADSLT] = TARGET_EBADSLT,
612 [EBFONT] = TARGET_EBFONT,
613 [ENOSTR] = TARGET_ENOSTR,
614 [ENODATA] = TARGET_ENODATA,
615 [ETIME] = TARGET_ETIME,
616 [ENOSR] = TARGET_ENOSR,
617 [ENONET] = TARGET_ENONET,
618 [ENOPKG] = TARGET_ENOPKG,
619 [EREMOTE] = TARGET_EREMOTE,
620 [ENOLINK] = TARGET_ENOLINK,
621 [EADV] = TARGET_EADV,
622 [ESRMNT] = TARGET_ESRMNT,
623 [ECOMM] = TARGET_ECOMM,
624 [EPROTO] = TARGET_EPROTO,
625 [EDOTDOT] = TARGET_EDOTDOT,
626 [EMULTIHOP] = TARGET_EMULTIHOP,
627 [EBADMSG] = TARGET_EBADMSG,
628 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
629 [EOVERFLOW] = TARGET_EOVERFLOW,
630 [ENOTUNIQ] = TARGET_ENOTUNIQ,
631 [EBADFD] = TARGET_EBADFD,
632 [EREMCHG] = TARGET_EREMCHG,
633 [ELIBACC] = TARGET_ELIBACC,
634 [ELIBBAD] = TARGET_ELIBBAD,
635 [ELIBSCN] = TARGET_ELIBSCN,
636 [ELIBMAX] = TARGET_ELIBMAX,
637 [ELIBEXEC] = TARGET_ELIBEXEC,
638 [EILSEQ] = TARGET_EILSEQ,
639 [ENOSYS] = TARGET_ENOSYS,
640 [ELOOP] = TARGET_ELOOP,
641 [ERESTART] = TARGET_ERESTART,
642 [ESTRPIPE] = TARGET_ESTRPIPE,
643 [ENOTEMPTY] = TARGET_ENOTEMPTY,
644 [EUSERS] = TARGET_EUSERS,
645 [ENOTSOCK] = TARGET_ENOTSOCK,
646 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
647 [EMSGSIZE] = TARGET_EMSGSIZE,
648 [EPROTOTYPE] = TARGET_EPROTOTYPE,
649 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
650 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
651 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
652 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
653 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
654 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
655 [EADDRINUSE] = TARGET_EADDRINUSE,
656 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
657 [ENETDOWN] = TARGET_ENETDOWN,
658 [ENETUNREACH] = TARGET_ENETUNREACH,
659 [ENETRESET] = TARGET_ENETRESET,
660 [ECONNABORTED] = TARGET_ECONNABORTED,
661 [ECONNRESET] = TARGET_ECONNRESET,
662 [ENOBUFS] = TARGET_ENOBUFS,
663 [EISCONN] = TARGET_EISCONN,
664 [ENOTCONN] = TARGET_ENOTCONN,
665 [EUCLEAN] = TARGET_EUCLEAN,
666 [ENOTNAM] = TARGET_ENOTNAM,
667 [ENAVAIL] = TARGET_ENAVAIL,
668 [EISNAM] = TARGET_EISNAM,
669 [EREMOTEIO] = TARGET_EREMOTEIO,
670 [ESHUTDOWN] = TARGET_ESHUTDOWN,
671 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
672 [ETIMEDOUT] = TARGET_ETIMEDOUT,
673 [ECONNREFUSED] = TARGET_ECONNREFUSED,
674 [EHOSTDOWN] = TARGET_EHOSTDOWN,
675 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
676 [EALREADY] = TARGET_EALREADY,
677 [EINPROGRESS] = TARGET_EINPROGRESS,
678 [ESTALE] = TARGET_ESTALE,
679 [ECANCELED] = TARGET_ECANCELED,
680 [ENOMEDIUM] = TARGET_ENOMEDIUM,
681 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
682 #ifdef ENOKEY
683 [ENOKEY] = TARGET_ENOKEY,
684 #endif
685 #ifdef EKEYEXPIRED
686 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
687 #endif
688 #ifdef EKEYREVOKED
689 [EKEYREVOKED] = TARGET_EKEYREVOKED,
690 #endif
691 #ifdef EKEYREJECTED
692 [EKEYREJECTED] = TARGET_EKEYREJECTED,
693 #endif
694 #ifdef EOWNERDEAD
695 [EOWNERDEAD] = TARGET_EOWNERDEAD,
696 #endif
697 #ifdef ENOTRECOVERABLE
698 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
699 #endif
700 };
701
702 static inline int host_to_target_errno(int err)
703 {
704 if(host_to_target_errno_table[err])
705 return host_to_target_errno_table[err];
706 return err;
707 }
708
709 static inline int target_to_host_errno(int err)
710 {
711 if (target_to_host_errno_table[err])
712 return target_to_host_errno_table[err];
713 return err;
714 }
715
716 static inline abi_long get_errno(abi_long ret)
717 {
718 if (ret == -1)
719 return -host_to_target_errno(errno);
720 else
721 return ret;
722 }
723
724 static inline int is_error(abi_long ret)
725 {
726 return (abi_ulong)ret >= (abi_ulong)(-4096);
727 }
728
729 char *target_strerror(int err)
730 {
731 return strerror(target_to_host_errno(err));
732 }
733
734 static abi_ulong target_brk;
735 static abi_ulong target_original_brk;
736 static abi_ulong brk_page;
737
738 void target_set_brk(abi_ulong new_brk)
739 {
740 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
741 brk_page = HOST_PAGE_ALIGN(target_brk);
742 }
743
744 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
745 #define DEBUGF_BRK(message, args...)
746
747 /* do_brk() must return target values and target errnos. */
748 abi_long do_brk(abi_ulong new_brk)
749 {
750 abi_long mapped_addr;
751 int new_alloc_size;
752
753 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk);
754
755 if (!new_brk) {
756 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk);
757 return target_brk;
758 }
759 if (new_brk < target_original_brk) {
760 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk);
761 return target_brk;
762 }
763
764 /* If the new brk is less than the highest page reserved to the
765 * target heap allocation, set it and we're almost done... */
766 if (new_brk <= brk_page) {
767 /* Heap contents are initialized to zero, as for anonymous
768 * mapped pages. */
769 if (new_brk > target_brk) {
770 memset(g2h(target_brk), 0, new_brk - target_brk);
771 }
772 target_brk = new_brk;
773 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk);
774 return target_brk;
775 }
776
777 /* We need to allocate more memory after the brk... Note that
778 * we don't use MAP_FIXED because that will map over the top of
779 * any existing mapping (like the one with the host libc or qemu
780 * itself); instead we treat "mapped but at wrong address" as
781 * a failure and unmap again.
782 */
783 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
784 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
785 PROT_READ|PROT_WRITE,
786 MAP_ANON|MAP_PRIVATE, 0, 0));
787
788 if (mapped_addr == brk_page) {
789 target_brk = new_brk;
790 brk_page = HOST_PAGE_ALIGN(target_brk);
791 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk);
792 return target_brk;
793 } else if (mapped_addr != -1) {
794 /* Mapped but at wrong address, meaning there wasn't actually
795 * enough space for this brk.
796 */
797 target_munmap(mapped_addr, new_alloc_size);
798 mapped_addr = -1;
799 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk);
800 }
801 else {
802 DEBUGF_BRK("%#010x (otherwise)\n", target_brk);
803 }
804
805 #if defined(TARGET_ALPHA)
806 /* We (partially) emulate OSF/1 on Alpha, which requires we
807 return a proper errno, not an unchanged brk value. */
808 return -TARGET_ENOMEM;
809 #endif
810 /* For everything else, return the previous break. */
811 return target_brk;
812 }
813
814 static inline abi_long copy_from_user_fdset(fd_set *fds,
815 abi_ulong target_fds_addr,
816 int n)
817 {
818 int i, nw, j, k;
819 abi_ulong b, *target_fds;
820
821 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
822 if (!(target_fds = lock_user(VERIFY_READ,
823 target_fds_addr,
824 sizeof(abi_ulong) * nw,
825 1)))
826 return -TARGET_EFAULT;
827
828 FD_ZERO(fds);
829 k = 0;
830 for (i = 0; i < nw; i++) {
831 /* grab the abi_ulong */
832 __get_user(b, &target_fds[i]);
833 for (j = 0; j < TARGET_ABI_BITS; j++) {
834 /* check the bit inside the abi_ulong */
835 if ((b >> j) & 1)
836 FD_SET(k, fds);
837 k++;
838 }
839 }
840
841 unlock_user(target_fds, target_fds_addr, 0);
842
843 return 0;
844 }
845
846 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
847 abi_ulong target_fds_addr,
848 int n)
849 {
850 if (target_fds_addr) {
851 if (copy_from_user_fdset(fds, target_fds_addr, n))
852 return -TARGET_EFAULT;
853 *fds_ptr = fds;
854 } else {
855 *fds_ptr = NULL;
856 }
857 return 0;
858 }
859
860 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
861 const fd_set *fds,
862 int n)
863 {
864 int i, nw, j, k;
865 abi_long v;
866 abi_ulong *target_fds;
867
868 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
869 if (!(target_fds = lock_user(VERIFY_WRITE,
870 target_fds_addr,
871 sizeof(abi_ulong) * nw,
872 0)))
873 return -TARGET_EFAULT;
874
875 k = 0;
876 for (i = 0; i < nw; i++) {
877 v = 0;
878 for (j = 0; j < TARGET_ABI_BITS; j++) {
879 v |= ((FD_ISSET(k, fds) != 0) << j);
880 k++;
881 }
882 __put_user(v, &target_fds[i]);
883 }
884
885 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
886
887 return 0;
888 }
889
890 #if defined(__alpha__)
891 #define HOST_HZ 1024
892 #else
893 #define HOST_HZ 100
894 #endif
895
896 static inline abi_long host_to_target_clock_t(long ticks)
897 {
898 #if HOST_HZ == TARGET_HZ
899 return ticks;
900 #else
901 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
902 #endif
903 }
904
905 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
906 const struct rusage *rusage)
907 {
908 struct target_rusage *target_rusage;
909
910 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
911 return -TARGET_EFAULT;
912 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
913 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
914 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
915 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
916 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
917 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
918 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
919 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
920 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
921 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
922 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
923 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
924 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
925 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
926 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
927 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
928 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
929 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
930 unlock_user_struct(target_rusage, target_addr, 1);
931
932 return 0;
933 }
934
935 static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
936 {
937 if (target_rlim == TARGET_RLIM_INFINITY)
938 return RLIM_INFINITY;
939 else
940 return tswapl(target_rlim);
941 }
942
943 static inline target_ulong host_to_target_rlim(rlim_t rlim)
944 {
945 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
946 return TARGET_RLIM_INFINITY;
947 else
948 return tswapl(rlim);
949 }
950
951 static inline abi_long copy_from_user_timeval(struct timeval *tv,
952 abi_ulong target_tv_addr)
953 {
954 struct target_timeval *target_tv;
955
956 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
957 return -TARGET_EFAULT;
958
959 __get_user(tv->tv_sec, &target_tv->tv_sec);
960 __get_user(tv->tv_usec, &target_tv->tv_usec);
961
962 unlock_user_struct(target_tv, target_tv_addr, 0);
963
964 return 0;
965 }
966
967 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
968 const struct timeval *tv)
969 {
970 struct target_timeval *target_tv;
971
972 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
973 return -TARGET_EFAULT;
974
975 __put_user(tv->tv_sec, &target_tv->tv_sec);
976 __put_user(tv->tv_usec, &target_tv->tv_usec);
977
978 unlock_user_struct(target_tv, target_tv_addr, 1);
979
980 return 0;
981 }
982
983 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
984 #include <mqueue.h>
985
986 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
987 abi_ulong target_mq_attr_addr)
988 {
989 struct target_mq_attr *target_mq_attr;
990
991 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
992 target_mq_attr_addr, 1))
993 return -TARGET_EFAULT;
994
995 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
996 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
997 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
998 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
999
1000 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1001
1002 return 0;
1003 }
1004
1005 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1006 const struct mq_attr *attr)
1007 {
1008 struct target_mq_attr *target_mq_attr;
1009
1010 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1011 target_mq_attr_addr, 0))
1012 return -TARGET_EFAULT;
1013
1014 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1015 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1016 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1017 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1018
1019 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1020
1021 return 0;
1022 }
1023 #endif
1024
1025 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1026 /* do_select() must return target values and target errnos. */
1027 static abi_long do_select(int n,
1028 abi_ulong rfd_addr, abi_ulong wfd_addr,
1029 abi_ulong efd_addr, abi_ulong target_tv_addr)
1030 {
1031 fd_set rfds, wfds, efds;
1032 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1033 struct timeval tv, *tv_ptr;
1034 abi_long ret;
1035
1036 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1037 if (ret) {
1038 return ret;
1039 }
1040 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1041 if (ret) {
1042 return ret;
1043 }
1044 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1045 if (ret) {
1046 return ret;
1047 }
1048
1049 if (target_tv_addr) {
1050 if (copy_from_user_timeval(&tv, target_tv_addr))
1051 return -TARGET_EFAULT;
1052 tv_ptr = &tv;
1053 } else {
1054 tv_ptr = NULL;
1055 }
1056
1057 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1058
1059 if (!is_error(ret)) {
1060 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1061 return -TARGET_EFAULT;
1062 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1063 return -TARGET_EFAULT;
1064 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1065 return -TARGET_EFAULT;
1066
1067 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1068 return -TARGET_EFAULT;
1069 }
1070
1071 return ret;
1072 }
1073 #endif
1074
1075 static abi_long do_pipe2(int host_pipe[], int flags)
1076 {
1077 #ifdef CONFIG_PIPE2
1078 return pipe2(host_pipe, flags);
1079 #else
1080 return -ENOSYS;
1081 #endif
1082 }
1083
1084 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1085 int flags, int is_pipe2)
1086 {
1087 int host_pipe[2];
1088 abi_long ret;
1089 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1090
1091 if (is_error(ret))
1092 return get_errno(ret);
1093
1094 /* Several targets have special calling conventions for the original
1095 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1096 if (!is_pipe2) {
1097 #if defined(TARGET_ALPHA)
1098 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1099 return host_pipe[0];
1100 #elif defined(TARGET_MIPS)
1101 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1102 return host_pipe[0];
1103 #elif defined(TARGET_SH4)
1104 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1105 return host_pipe[0];
1106 #endif
1107 }
1108
1109 if (put_user_s32(host_pipe[0], pipedes)
1110 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1111 return -TARGET_EFAULT;
1112 return get_errno(ret);
1113 }
1114
1115 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1116 abi_ulong target_addr,
1117 socklen_t len)
1118 {
1119 struct target_ip_mreqn *target_smreqn;
1120
1121 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1122 if (!target_smreqn)
1123 return -TARGET_EFAULT;
1124 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1125 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1126 if (len == sizeof(struct target_ip_mreqn))
1127 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1128 unlock_user(target_smreqn, target_addr, 0);
1129
1130 return 0;
1131 }
1132
1133 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1134 abi_ulong target_addr,
1135 socklen_t len)
1136 {
1137 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1138 sa_family_t sa_family;
1139 struct target_sockaddr *target_saddr;
1140
1141 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1142 if (!target_saddr)
1143 return -TARGET_EFAULT;
1144
1145 sa_family = tswap16(target_saddr->sa_family);
1146
1147 /* Oops. The caller might send a incomplete sun_path; sun_path
1148 * must be terminated by \0 (see the manual page), but
1149 * unfortunately it is quite common to specify sockaddr_un
1150 * length as "strlen(x->sun_path)" while it should be
1151 * "strlen(...) + 1". We'll fix that here if needed.
1152 * Linux kernel has a similar feature.
1153 */
1154
1155 if (sa_family == AF_UNIX) {
1156 if (len < unix_maxlen && len > 0) {
1157 char *cp = (char*)target_saddr;
1158
1159 if ( cp[len-1] && !cp[len] )
1160 len++;
1161 }
1162 if (len > unix_maxlen)
1163 len = unix_maxlen;
1164 }
1165
1166 memcpy(addr, target_saddr, len);
1167 addr->sa_family = sa_family;
1168 unlock_user(target_saddr, target_addr, 0);
1169
1170 return 0;
1171 }
1172
1173 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1174 struct sockaddr *addr,
1175 socklen_t len)
1176 {
1177 struct target_sockaddr *target_saddr;
1178
1179 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1180 if (!target_saddr)
1181 return -TARGET_EFAULT;
1182 memcpy(target_saddr, addr, len);
1183 target_saddr->sa_family = tswap16(addr->sa_family);
1184 unlock_user(target_saddr, target_addr, len);
1185
1186 return 0;
1187 }
1188
1189 /* ??? Should this also swap msgh->name? */
1190 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1191 struct target_msghdr *target_msgh)
1192 {
1193 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1194 abi_long msg_controllen;
1195 abi_ulong target_cmsg_addr;
1196 struct target_cmsghdr *target_cmsg;
1197 socklen_t space = 0;
1198
1199 msg_controllen = tswapl(target_msgh->msg_controllen);
1200 if (msg_controllen < sizeof (struct target_cmsghdr))
1201 goto the_end;
1202 target_cmsg_addr = tswapl(target_msgh->msg_control);
1203 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1204 if (!target_cmsg)
1205 return -TARGET_EFAULT;
1206
1207 while (cmsg && target_cmsg) {
1208 void *data = CMSG_DATA(cmsg);
1209 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1210
1211 int len = tswapl(target_cmsg->cmsg_len)
1212 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1213
1214 space += CMSG_SPACE(len);
1215 if (space > msgh->msg_controllen) {
1216 space -= CMSG_SPACE(len);
1217 gemu_log("Host cmsg overflow\n");
1218 break;
1219 }
1220
1221 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1222 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1223 cmsg->cmsg_len = CMSG_LEN(len);
1224
1225 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1226 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1227 memcpy(data, target_data, len);
1228 } else {
1229 int *fd = (int *)data;
1230 int *target_fd = (int *)target_data;
1231 int i, numfds = len / sizeof(int);
1232
1233 for (i = 0; i < numfds; i++)
1234 fd[i] = tswap32(target_fd[i]);
1235 }
1236
1237 cmsg = CMSG_NXTHDR(msgh, cmsg);
1238 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1239 }
1240 unlock_user(target_cmsg, target_cmsg_addr, 0);
1241 the_end:
1242 msgh->msg_controllen = space;
1243 return 0;
1244 }
1245
1246 /* ??? Should this also swap msgh->name? */
1247 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1248 struct msghdr *msgh)
1249 {
1250 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1251 abi_long msg_controllen;
1252 abi_ulong target_cmsg_addr;
1253 struct target_cmsghdr *target_cmsg;
1254 socklen_t space = 0;
1255
1256 msg_controllen = tswapl(target_msgh->msg_controllen);
1257 if (msg_controllen < sizeof (struct target_cmsghdr))
1258 goto the_end;
1259 target_cmsg_addr = tswapl(target_msgh->msg_control);
1260 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1261 if (!target_cmsg)
1262 return -TARGET_EFAULT;
1263
1264 while (cmsg && target_cmsg) {
1265 void *data = CMSG_DATA(cmsg);
1266 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1267
1268 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1269
1270 space += TARGET_CMSG_SPACE(len);
1271 if (space > msg_controllen) {
1272 space -= TARGET_CMSG_SPACE(len);
1273 gemu_log("Target cmsg overflow\n");
1274 break;
1275 }
1276
1277 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1278 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1279 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1280
1281 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1282 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1283 memcpy(target_data, data, len);
1284 } else {
1285 int *fd = (int *)data;
1286 int *target_fd = (int *)target_data;
1287 int i, numfds = len / sizeof(int);
1288
1289 for (i = 0; i < numfds; i++)
1290 target_fd[i] = tswap32(fd[i]);
1291 }
1292
1293 cmsg = CMSG_NXTHDR(msgh, cmsg);
1294 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1295 }
1296 unlock_user(target_cmsg, target_cmsg_addr, space);
1297 the_end:
1298 target_msgh->msg_controllen = tswapl(space);
1299 return 0;
1300 }
1301
1302 /* do_setsockopt() Must return target values and target errnos. */
1303 static abi_long do_setsockopt(int sockfd, int level, int optname,
1304 abi_ulong optval_addr, socklen_t optlen)
1305 {
1306 abi_long ret;
1307 int val;
1308 struct ip_mreqn *ip_mreq;
1309 struct ip_mreq_source *ip_mreq_source;
1310
1311 switch(level) {
1312 case SOL_TCP:
1313 /* TCP options all take an 'int' value. */
1314 if (optlen < sizeof(uint32_t))
1315 return -TARGET_EINVAL;
1316
1317 if (get_user_u32(val, optval_addr))
1318 return -TARGET_EFAULT;
1319 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1320 break;
1321 case SOL_IP:
1322 switch(optname) {
1323 case IP_TOS:
1324 case IP_TTL:
1325 case IP_HDRINCL:
1326 case IP_ROUTER_ALERT:
1327 case IP_RECVOPTS:
1328 case IP_RETOPTS:
1329 case IP_PKTINFO:
1330 case IP_MTU_DISCOVER:
1331 case IP_RECVERR:
1332 case IP_RECVTOS:
1333 #ifdef IP_FREEBIND
1334 case IP_FREEBIND:
1335 #endif
1336 case IP_MULTICAST_TTL:
1337 case IP_MULTICAST_LOOP:
1338 val = 0;
1339 if (optlen >= sizeof(uint32_t)) {
1340 if (get_user_u32(val, optval_addr))
1341 return -TARGET_EFAULT;
1342 } else if (optlen >= 1) {
1343 if (get_user_u8(val, optval_addr))
1344 return -TARGET_EFAULT;
1345 }
1346 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1347 break;
1348 case IP_ADD_MEMBERSHIP:
1349 case IP_DROP_MEMBERSHIP:
1350 if (optlen < sizeof (struct target_ip_mreq) ||
1351 optlen > sizeof (struct target_ip_mreqn))
1352 return -TARGET_EINVAL;
1353
1354 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1355 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1356 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1357 break;
1358
1359 case IP_BLOCK_SOURCE:
1360 case IP_UNBLOCK_SOURCE:
1361 case IP_ADD_SOURCE_MEMBERSHIP:
1362 case IP_DROP_SOURCE_MEMBERSHIP:
1363 if (optlen != sizeof (struct target_ip_mreq_source))
1364 return -TARGET_EINVAL;
1365
1366 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1367 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1368 unlock_user (ip_mreq_source, optval_addr, 0);
1369 break;
1370
1371 default:
1372 goto unimplemented;
1373 }
1374 break;
1375 case TARGET_SOL_SOCKET:
1376 switch (optname) {
1377 /* Options with 'int' argument. */
1378 case TARGET_SO_DEBUG:
1379 optname = SO_DEBUG;
1380 break;
1381 case TARGET_SO_REUSEADDR:
1382 optname = SO_REUSEADDR;
1383 break;
1384 case TARGET_SO_TYPE:
1385 optname = SO_TYPE;
1386 break;
1387 case TARGET_SO_ERROR:
1388 optname = SO_ERROR;
1389 break;
1390 case TARGET_SO_DONTROUTE:
1391 optname = SO_DONTROUTE;
1392 break;
1393 case TARGET_SO_BROADCAST:
1394 optname = SO_BROADCAST;
1395 break;
1396 case TARGET_SO_SNDBUF:
1397 optname = SO_SNDBUF;
1398 break;
1399 case TARGET_SO_RCVBUF:
1400 optname = SO_RCVBUF;
1401 break;
1402 case TARGET_SO_KEEPALIVE:
1403 optname = SO_KEEPALIVE;
1404 break;
1405 case TARGET_SO_OOBINLINE:
1406 optname = SO_OOBINLINE;
1407 break;
1408 case TARGET_SO_NO_CHECK:
1409 optname = SO_NO_CHECK;
1410 break;
1411 case TARGET_SO_PRIORITY:
1412 optname = SO_PRIORITY;
1413 break;
1414 #ifdef SO_BSDCOMPAT
1415 case TARGET_SO_BSDCOMPAT:
1416 optname = SO_BSDCOMPAT;
1417 break;
1418 #endif
1419 case TARGET_SO_PASSCRED:
1420 optname = SO_PASSCRED;
1421 break;
1422 case TARGET_SO_TIMESTAMP:
1423 optname = SO_TIMESTAMP;
1424 break;
1425 case TARGET_SO_RCVLOWAT:
1426 optname = SO_RCVLOWAT;
1427 break;
1428 case TARGET_SO_RCVTIMEO:
1429 optname = SO_RCVTIMEO;
1430 break;
1431 case TARGET_SO_SNDTIMEO:
1432 optname = SO_SNDTIMEO;
1433 break;
1434 break;
1435 default:
1436 goto unimplemented;
1437 }
1438 if (optlen < sizeof(uint32_t))
1439 return -TARGET_EINVAL;
1440
1441 if (get_user_u32(val, optval_addr))
1442 return -TARGET_EFAULT;
1443 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1444 break;
1445 default:
1446 unimplemented:
1447 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1448 ret = -TARGET_ENOPROTOOPT;
1449 }
1450 return ret;
1451 }
1452
1453 /* do_getsockopt() Must return target values and target errnos. */
1454 static abi_long do_getsockopt(int sockfd, int level, int optname,
1455 abi_ulong optval_addr, abi_ulong optlen)
1456 {
1457 abi_long ret;
1458 int len, val;
1459 socklen_t lv;
1460
1461 switch(level) {
1462 case TARGET_SOL_SOCKET:
1463 level = SOL_SOCKET;
1464 switch (optname) {
1465 /* These don't just return a single integer */
1466 case TARGET_SO_LINGER:
1467 case TARGET_SO_RCVTIMEO:
1468 case TARGET_SO_SNDTIMEO:
1469 case TARGET_SO_PEERCRED:
1470 case TARGET_SO_PEERNAME:
1471 goto unimplemented;
1472 /* Options with 'int' argument. */
1473 case TARGET_SO_DEBUG:
1474 optname = SO_DEBUG;
1475 goto int_case;
1476 case TARGET_SO_REUSEADDR:
1477 optname = SO_REUSEADDR;
1478 goto int_case;
1479 case TARGET_SO_TYPE:
1480 optname = SO_TYPE;
1481 goto int_case;
1482 case TARGET_SO_ERROR:
1483 optname = SO_ERROR;
1484 goto int_case;
1485 case TARGET_SO_DONTROUTE:
1486 optname = SO_DONTROUTE;
1487 goto int_case;
1488 case TARGET_SO_BROADCAST:
1489 optname = SO_BROADCAST;
1490 goto int_case;
1491 case TARGET_SO_SNDBUF:
1492 optname = SO_SNDBUF;
1493 goto int_case;
1494 case TARGET_SO_RCVBUF:
1495 optname = SO_RCVBUF;
1496 goto int_case;
1497 case TARGET_SO_KEEPALIVE:
1498 optname = SO_KEEPALIVE;
1499 goto int_case;
1500 case TARGET_SO_OOBINLINE:
1501 optname = SO_OOBINLINE;
1502 goto int_case;
1503 case TARGET_SO_NO_CHECK:
1504 optname = SO_NO_CHECK;
1505 goto int_case;
1506 case TARGET_SO_PRIORITY:
1507 optname = SO_PRIORITY;
1508 goto int_case;
1509 #ifdef SO_BSDCOMPAT
1510 case TARGET_SO_BSDCOMPAT:
1511 optname = SO_BSDCOMPAT;
1512 goto int_case;
1513 #endif
1514 case TARGET_SO_PASSCRED:
1515 optname = SO_PASSCRED;
1516 goto int_case;
1517 case TARGET_SO_TIMESTAMP:
1518 optname = SO_TIMESTAMP;
1519 goto int_case;
1520 case TARGET_SO_RCVLOWAT:
1521 optname = SO_RCVLOWAT;
1522 goto int_case;
1523 default:
1524 goto int_case;
1525 }
1526 break;
1527 case SOL_TCP:
1528 /* TCP options all take an 'int' value. */
1529 int_case:
1530 if (get_user_u32(len, optlen))
1531 return -TARGET_EFAULT;
1532 if (len < 0)
1533 return -TARGET_EINVAL;
1534 lv = sizeof(lv);
1535 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1536 if (ret < 0)
1537 return ret;
1538 if (len > lv)
1539 len = lv;
1540 if (len == 4) {
1541 if (put_user_u32(val, optval_addr))
1542 return -TARGET_EFAULT;
1543 } else {
1544 if (put_user_u8(val, optval_addr))
1545 return -TARGET_EFAULT;
1546 }
1547 if (put_user_u32(len, optlen))
1548 return -TARGET_EFAULT;
1549 break;
1550 case SOL_IP:
1551 switch(optname) {
1552 case IP_TOS:
1553 case IP_TTL:
1554 case IP_HDRINCL:
1555 case IP_ROUTER_ALERT:
1556 case IP_RECVOPTS:
1557 case IP_RETOPTS:
1558 case IP_PKTINFO:
1559 case IP_MTU_DISCOVER:
1560 case IP_RECVERR:
1561 case IP_RECVTOS:
1562 #ifdef IP_FREEBIND
1563 case IP_FREEBIND:
1564 #endif
1565 case IP_MULTICAST_TTL:
1566 case IP_MULTICAST_LOOP:
1567 if (get_user_u32(len, optlen))
1568 return -TARGET_EFAULT;
1569 if (len < 0)
1570 return -TARGET_EINVAL;
1571 lv = sizeof(lv);
1572 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1573 if (ret < 0)
1574 return ret;
1575 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1576 len = 1;
1577 if (put_user_u32(len, optlen)
1578 || put_user_u8(val, optval_addr))
1579 return -TARGET_EFAULT;
1580 } else {
1581 if (len > sizeof(int))
1582 len = sizeof(int);
1583 if (put_user_u32(len, optlen)
1584 || put_user_u32(val, optval_addr))
1585 return -TARGET_EFAULT;
1586 }
1587 break;
1588 default:
1589 ret = -TARGET_ENOPROTOOPT;
1590 break;
1591 }
1592 break;
1593 default:
1594 unimplemented:
1595 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1596 level, optname);
1597 ret = -TARGET_EOPNOTSUPP;
1598 break;
1599 }
1600 return ret;
1601 }
1602
1603 /* FIXME
1604 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1605 * other lock functions have a return code of 0 for failure.
1606 */
1607 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1608 int count, int copy)
1609 {
1610 struct target_iovec *target_vec;
1611 abi_ulong base;
1612 int i;
1613
1614 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1615 if (!target_vec)
1616 return -TARGET_EFAULT;
1617 for(i = 0;i < count; i++) {
1618 base = tswapl(target_vec[i].iov_base);
1619 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1620 if (vec[i].iov_len != 0) {
1621 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1622 /* Don't check lock_user return value. We must call writev even
1623 if a element has invalid base address. */
1624 } else {
1625 /* zero length pointer is ignored */
1626 vec[i].iov_base = NULL;
1627 }
1628 }
1629 unlock_user (target_vec, target_addr, 0);
1630 return 0;
1631 }
1632
1633 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1634 int count, int copy)
1635 {
1636 struct target_iovec *target_vec;
1637 abi_ulong base;
1638 int i;
1639
1640 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1641 if (!target_vec)
1642 return -TARGET_EFAULT;
1643 for(i = 0;i < count; i++) {
1644 if (target_vec[i].iov_base) {
1645 base = tswapl(target_vec[i].iov_base);
1646 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1647 }
1648 }
1649 unlock_user (target_vec, target_addr, 0);
1650
1651 return 0;
1652 }
1653
1654 /* do_socket() Must return target values and target errnos. */
1655 static abi_long do_socket(int domain, int type, int protocol)
1656 {
1657 #if defined(TARGET_MIPS)
1658 switch(type) {
1659 case TARGET_SOCK_DGRAM:
1660 type = SOCK_DGRAM;
1661 break;
1662 case TARGET_SOCK_STREAM:
1663 type = SOCK_STREAM;
1664 break;
1665 case TARGET_SOCK_RAW:
1666 type = SOCK_RAW;
1667 break;
1668 case TARGET_SOCK_RDM:
1669 type = SOCK_RDM;
1670 break;
1671 case TARGET_SOCK_SEQPACKET:
1672 type = SOCK_SEQPACKET;
1673 break;
1674 case TARGET_SOCK_PACKET:
1675 type = SOCK_PACKET;
1676 break;
1677 }
1678 #endif
1679 if (domain == PF_NETLINK)
1680 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1681 return get_errno(socket(domain, type, protocol));
1682 }
1683
1684 /* do_bind() Must return target values and target errnos. */
1685 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1686 socklen_t addrlen)
1687 {
1688 void *addr;
1689 abi_long ret;
1690
1691 if ((int)addrlen < 0) {
1692 return -TARGET_EINVAL;
1693 }
1694
1695 addr = alloca(addrlen+1);
1696
1697 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1698 if (ret)
1699 return ret;
1700
1701 return get_errno(bind(sockfd, addr, addrlen));
1702 }
1703
1704 /* do_connect() Must return target values and target errnos. */
1705 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1706 socklen_t addrlen)
1707 {
1708 void *addr;
1709 abi_long ret;
1710
1711 if ((int)addrlen < 0) {
1712 return -TARGET_EINVAL;
1713 }
1714
1715 addr = alloca(addrlen);
1716
1717 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1718 if (ret)
1719 return ret;
1720
1721 return get_errno(connect(sockfd, addr, addrlen));
1722 }
1723
1724 /* do_sendrecvmsg() Must return target values and target errnos. */
1725 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1726 int flags, int send)
1727 {
1728 abi_long ret, len;
1729 struct target_msghdr *msgp;
1730 struct msghdr msg;
1731 int count;
1732 struct iovec *vec;
1733 abi_ulong target_vec;
1734
1735 /* FIXME */
1736 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1737 msgp,
1738 target_msg,
1739 send ? 1 : 0))
1740 return -TARGET_EFAULT;
1741 if (msgp->msg_name) {
1742 msg.msg_namelen = tswap32(msgp->msg_namelen);
1743 msg.msg_name = alloca(msg.msg_namelen);
1744 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1745 msg.msg_namelen);
1746 if (ret) {
1747 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1748 return ret;
1749 }
1750 } else {
1751 msg.msg_name = NULL;
1752 msg.msg_namelen = 0;
1753 }
1754 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1755 msg.msg_control = alloca(msg.msg_controllen);
1756 msg.msg_flags = tswap32(msgp->msg_flags);
1757
1758 count = tswapl(msgp->msg_iovlen);
1759 vec = alloca(count * sizeof(struct iovec));
1760 target_vec = tswapl(msgp->msg_iov);
1761 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1762 msg.msg_iovlen = count;
1763 msg.msg_iov = vec;
1764
1765 if (send) {
1766 ret = target_to_host_cmsg(&msg, msgp);
1767 if (ret == 0)
1768 ret = get_errno(sendmsg(fd, &msg, flags));
1769 } else {
1770 ret = get_errno(recvmsg(fd, &msg, flags));
1771 if (!is_error(ret)) {
1772 len = ret;
1773 ret = host_to_target_cmsg(msgp, &msg);
1774 if (!is_error(ret))
1775 ret = len;
1776 }
1777 }
1778 unlock_iovec(vec, target_vec, count, !send);
1779 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1780 return ret;
1781 }
1782
1783 /* do_accept() Must return target values and target errnos. */
1784 static abi_long do_accept(int fd, abi_ulong target_addr,
1785 abi_ulong target_addrlen_addr)
1786 {
1787 socklen_t addrlen;
1788 void *addr;
1789 abi_long ret;
1790
1791 if (target_addr == 0)
1792 return get_errno(accept(fd, NULL, NULL));
1793
1794 /* linux returns EINVAL if addrlen pointer is invalid */
1795 if (get_user_u32(addrlen, target_addrlen_addr))
1796 return -TARGET_EINVAL;
1797
1798 if ((int)addrlen < 0) {
1799 return -TARGET_EINVAL;
1800 }
1801
1802 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1803 return -TARGET_EINVAL;
1804
1805 addr = alloca(addrlen);
1806
1807 ret = get_errno(accept(fd, addr, &addrlen));
1808 if (!is_error(ret)) {
1809 host_to_target_sockaddr(target_addr, addr, addrlen);
1810 if (put_user_u32(addrlen, target_addrlen_addr))
1811 ret = -TARGET_EFAULT;
1812 }
1813 return ret;
1814 }
1815
1816 /* do_getpeername() Must return target values and target errnos. */
1817 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1818 abi_ulong target_addrlen_addr)
1819 {
1820 socklen_t addrlen;
1821 void *addr;
1822 abi_long ret;
1823
1824 if (get_user_u32(addrlen, target_addrlen_addr))
1825 return -TARGET_EFAULT;
1826
1827 if ((int)addrlen < 0) {
1828 return -TARGET_EINVAL;
1829 }
1830
1831 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1832 return -TARGET_EFAULT;
1833
1834 addr = alloca(addrlen);
1835
1836 ret = get_errno(getpeername(fd, addr, &addrlen));
1837 if (!is_error(ret)) {
1838 host_to_target_sockaddr(target_addr, addr, addrlen);
1839 if (put_user_u32(addrlen, target_addrlen_addr))
1840 ret = -TARGET_EFAULT;
1841 }
1842 return ret;
1843 }
1844
1845 /* do_getsockname() Must return target values and target errnos. */
1846 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1847 abi_ulong target_addrlen_addr)
1848 {
1849 socklen_t addrlen;
1850 void *addr;
1851 abi_long ret;
1852
1853 if (get_user_u32(addrlen, target_addrlen_addr))
1854 return -TARGET_EFAULT;
1855
1856 if ((int)addrlen < 0) {
1857 return -TARGET_EINVAL;
1858 }
1859
1860 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1861 return -TARGET_EFAULT;
1862
1863 addr = alloca(addrlen);
1864
1865 ret = get_errno(getsockname(fd, addr, &addrlen));
1866 if (!is_error(ret)) {
1867 host_to_target_sockaddr(target_addr, addr, addrlen);
1868 if (put_user_u32(addrlen, target_addrlen_addr))
1869 ret = -TARGET_EFAULT;
1870 }
1871 return ret;
1872 }
1873
1874 /* do_socketpair() Must return target values and target errnos. */
1875 static abi_long do_socketpair(int domain, int type, int protocol,
1876 abi_ulong target_tab_addr)
1877 {
1878 int tab[2];
1879 abi_long ret;
1880
1881 ret = get_errno(socketpair(domain, type, protocol, tab));
1882 if (!is_error(ret)) {
1883 if (put_user_s32(tab[0], target_tab_addr)
1884 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1885 ret = -TARGET_EFAULT;
1886 }
1887 return ret;
1888 }
1889
1890 /* do_sendto() Must return target values and target errnos. */
1891 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1892 abi_ulong target_addr, socklen_t addrlen)
1893 {
1894 void *addr;
1895 void *host_msg;
1896 abi_long ret;
1897
1898 if ((int)addrlen < 0) {
1899 return -TARGET_EINVAL;
1900 }
1901
1902 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1903 if (!host_msg)
1904 return -TARGET_EFAULT;
1905 if (target_addr) {
1906 addr = alloca(addrlen);
1907 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1908 if (ret) {
1909 unlock_user(host_msg, msg, 0);
1910 return ret;
1911 }
1912 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1913 } else {
1914 ret = get_errno(send(fd, host_msg, len, flags));
1915 }
1916 unlock_user(host_msg, msg, 0);
1917 return ret;
1918 }
1919
1920 /* do_recvfrom() Must return target values and target errnos. */
1921 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1922 abi_ulong target_addr,
1923 abi_ulong target_addrlen)
1924 {
1925 socklen_t addrlen;
1926 void *addr;
1927 void *host_msg;
1928 abi_long ret;
1929
1930 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1931 if (!host_msg)
1932 return -TARGET_EFAULT;
1933 if (target_addr) {
1934 if (get_user_u32(addrlen, target_addrlen)) {
1935 ret = -TARGET_EFAULT;
1936 goto fail;
1937 }
1938 if ((int)addrlen < 0) {
1939 ret = -TARGET_EINVAL;
1940 goto fail;
1941 }
1942 addr = alloca(addrlen);
1943 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1944 } else {
1945 addr = NULL; /* To keep compiler quiet. */
1946 ret = get_errno(recv(fd, host_msg, len, flags));
1947 }
1948 if (!is_error(ret)) {
1949 if (target_addr) {
1950 host_to_target_sockaddr(target_addr, addr, addrlen);
1951 if (put_user_u32(addrlen, target_addrlen)) {
1952 ret = -TARGET_EFAULT;
1953 goto fail;
1954 }
1955 }
1956 unlock_user(host_msg, msg, len);
1957 } else {
1958 fail:
1959 unlock_user(host_msg, msg, 0);
1960 }
1961 return ret;
1962 }
1963
1964 #ifdef TARGET_NR_socketcall
1965 /* do_socketcall() Must return target values and target errnos. */
1966 static abi_long do_socketcall(int num, abi_ulong vptr)
1967 {
1968 abi_long ret;
1969 const int n = sizeof(abi_ulong);
1970
1971 switch(num) {
1972 case SOCKOP_socket:
1973 {
1974 abi_ulong domain, type, protocol;
1975
1976 if (get_user_ual(domain, vptr)
1977 || get_user_ual(type, vptr + n)
1978 || get_user_ual(protocol, vptr + 2 * n))
1979 return -TARGET_EFAULT;
1980
1981 ret = do_socket(domain, type, protocol);
1982 }
1983 break;
1984 case SOCKOP_bind:
1985 {
1986 abi_ulong sockfd;
1987 abi_ulong target_addr;
1988 socklen_t addrlen;
1989
1990 if (get_user_ual(sockfd, vptr)
1991 || get_user_ual(target_addr, vptr + n)
1992 || get_user_ual(addrlen, vptr + 2 * n))
1993 return -TARGET_EFAULT;
1994
1995 ret = do_bind(sockfd, target_addr, addrlen);
1996 }
1997 break;
1998 case SOCKOP_connect:
1999 {
2000 abi_ulong sockfd;
2001 abi_ulong target_addr;
2002 socklen_t addrlen;
2003
2004 if (get_user_ual(sockfd, vptr)
2005 || get_user_ual(target_addr, vptr + n)
2006 || get_user_ual(addrlen, vptr + 2 * n))
2007 return -TARGET_EFAULT;
2008
2009 ret = do_connect(sockfd, target_addr, addrlen);
2010 }
2011 break;
2012 case SOCKOP_listen:
2013 {
2014 abi_ulong sockfd, backlog;
2015
2016 if (get_user_ual(sockfd, vptr)
2017 || get_user_ual(backlog, vptr + n))
2018 return -TARGET_EFAULT;
2019
2020 ret = get_errno(listen(sockfd, backlog));
2021 }
2022 break;
2023 case SOCKOP_accept:
2024 {
2025 abi_ulong sockfd;
2026 abi_ulong target_addr, target_addrlen;
2027
2028 if (get_user_ual(sockfd, vptr)
2029 || get_user_ual(target_addr, vptr + n)
2030 || get_user_ual(target_addrlen, vptr + 2 * n))
2031 return -TARGET_EFAULT;
2032
2033 ret = do_accept(sockfd, target_addr, target_addrlen);
2034 }
2035 break;
2036 case SOCKOP_getsockname:
2037 {
2038 abi_ulong sockfd;
2039 abi_ulong target_addr, target_addrlen;
2040
2041 if (get_user_ual(sockfd, vptr)
2042 || get_user_ual(target_addr, vptr + n)
2043 || get_user_ual(target_addrlen, vptr + 2 * n))
2044 return -TARGET_EFAULT;
2045
2046 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2047 }
2048 break;
2049 case SOCKOP_getpeername:
2050 {
2051 abi_ulong sockfd;
2052 abi_ulong target_addr, target_addrlen;
2053
2054 if (get_user_ual(sockfd, vptr)
2055 || get_user_ual(target_addr, vptr + n)
2056 || get_user_ual(target_addrlen, vptr + 2 * n))
2057 return -TARGET_EFAULT;
2058
2059 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2060 }
2061 break;
2062 case SOCKOP_socketpair:
2063 {
2064 abi_ulong domain, type, protocol;
2065 abi_ulong tab;
2066
2067 if (get_user_ual(domain, vptr)
2068 || get_user_ual(type, vptr + n)
2069 || get_user_ual(protocol, vptr + 2 * n)
2070 || get_user_ual(tab, vptr + 3 * n))
2071 return -TARGET_EFAULT;
2072
2073 ret = do_socketpair(domain, type, protocol, tab);
2074 }
2075 break;
2076 case SOCKOP_send:
2077 {
2078 abi_ulong sockfd;
2079 abi_ulong msg;
2080 size_t len;
2081 abi_ulong flags;
2082
2083 if (get_user_ual(sockfd, vptr)
2084 || get_user_ual(msg, vptr + n)
2085 || get_user_ual(len, vptr + 2 * n)
2086 || get_user_ual(flags, vptr + 3 * n))
2087 return -TARGET_EFAULT;
2088
2089 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2090 }
2091 break;
2092 case SOCKOP_recv:
2093 {
2094 abi_ulong sockfd;
2095 abi_ulong msg;
2096 size_t len;
2097 abi_ulong flags;
2098
2099 if (get_user_ual(sockfd, vptr)
2100 || get_user_ual(msg, vptr + n)
2101 || get_user_ual(len, vptr + 2 * n)
2102 || get_user_ual(flags, vptr + 3 * n))
2103 return -TARGET_EFAULT;
2104
2105 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2106 }
2107 break;
2108 case SOCKOP_sendto:
2109 {
2110 abi_ulong sockfd;
2111 abi_ulong msg;
2112 size_t len;
2113 abi_ulong flags;
2114 abi_ulong addr;
2115 socklen_t addrlen;
2116
2117 if (get_user_ual(sockfd, vptr)
2118 || get_user_ual(msg, vptr + n)
2119 || get_user_ual(len, vptr + 2 * n)
2120 || get_user_ual(flags, vptr + 3 * n)
2121 || get_user_ual(addr, vptr + 4 * n)
2122 || get_user_ual(addrlen, vptr + 5 * n))
2123 return -TARGET_EFAULT;
2124
2125 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2126 }
2127 break;
2128 case SOCKOP_recvfrom:
2129 {
2130 abi_ulong sockfd;
2131 abi_ulong msg;
2132 size_t len;
2133 abi_ulong flags;
2134 abi_ulong addr;
2135 socklen_t addrlen;
2136
2137 if (get_user_ual(sockfd, vptr)
2138 || get_user_ual(msg, vptr + n)
2139 || get_user_ual(len, vptr + 2 * n)
2140 || get_user_ual(flags, vptr + 3 * n)
2141 || get_user_ual(addr, vptr + 4 * n)
2142 || get_user_ual(addrlen, vptr + 5 * n))
2143 return -TARGET_EFAULT;
2144
2145 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2146 }
2147 break;
2148 case SOCKOP_shutdown:
2149 {
2150 abi_ulong sockfd, how;
2151
2152 if (get_user_ual(sockfd, vptr)
2153 || get_user_ual(how, vptr + n))
2154 return -TARGET_EFAULT;
2155
2156 ret = get_errno(shutdown(sockfd, how));
2157 }
2158 break;
2159 case SOCKOP_sendmsg:
2160 case SOCKOP_recvmsg:
2161 {
2162 abi_ulong fd;
2163 abi_ulong target_msg;
2164 abi_ulong flags;
2165
2166 if (get_user_ual(fd, vptr)
2167 || get_user_ual(target_msg, vptr + n)
2168 || get_user_ual(flags, vptr + 2 * n))
2169 return -TARGET_EFAULT;
2170
2171 ret = do_sendrecvmsg(fd, target_msg, flags,
2172 (num == SOCKOP_sendmsg));
2173 }
2174 break;
2175 case SOCKOP_setsockopt:
2176 {
2177 abi_ulong sockfd;
2178 abi_ulong level;
2179 abi_ulong optname;
2180 abi_ulong optval;
2181 socklen_t optlen;
2182
2183 if (get_user_ual(sockfd, vptr)
2184 || get_user_ual(level, vptr + n)
2185 || get_user_ual(optname, vptr + 2 * n)
2186 || get_user_ual(optval, vptr + 3 * n)
2187 || get_user_ual(optlen, vptr + 4 * n))
2188 return -TARGET_EFAULT;
2189
2190 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2191 }
2192 break;
2193 case SOCKOP_getsockopt:
2194 {
2195 abi_ulong sockfd;
2196 abi_ulong level;
2197 abi_ulong optname;
2198 abi_ulong optval;
2199 socklen_t optlen;
2200
2201 if (get_user_ual(sockfd, vptr)
2202 || get_user_ual(level, vptr + n)
2203 || get_user_ual(optname, vptr + 2 * n)
2204 || get_user_ual(optval, vptr + 3 * n)
2205 || get_user_ual(optlen, vptr + 4 * n))
2206 return -TARGET_EFAULT;
2207
2208 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2209 }
2210 break;
2211 default:
2212 gemu_log("Unsupported socketcall: %d\n", num);
2213 ret = -TARGET_ENOSYS;
2214 break;
2215 }
2216 return ret;
2217 }
2218 #endif
2219
2220 #define N_SHM_REGIONS 32
2221
2222 static struct shm_region {
2223 abi_ulong start;
2224 abi_ulong size;
2225 } shm_regions[N_SHM_REGIONS];
2226
2227 struct target_ipc_perm
2228 {
2229 abi_long __key;
2230 abi_ulong uid;
2231 abi_ulong gid;
2232 abi_ulong cuid;
2233 abi_ulong cgid;
2234 unsigned short int mode;
2235 unsigned short int __pad1;
2236 unsigned short int __seq;
2237 unsigned short int __pad2;
2238 abi_ulong __unused1;
2239 abi_ulong __unused2;
2240 };
2241
2242 struct target_semid_ds
2243 {
2244 struct target_ipc_perm sem_perm;
2245 abi_ulong sem_otime;
2246 abi_ulong __unused1;
2247 abi_ulong sem_ctime;
2248 abi_ulong __unused2;
2249 abi_ulong sem_nsems;
2250 abi_ulong __unused3;
2251 abi_ulong __unused4;
2252 };
2253
2254 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2255 abi_ulong target_addr)
2256 {
2257 struct target_ipc_perm *target_ip;
2258 struct target_semid_ds *target_sd;
2259
2260 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2261 return -TARGET_EFAULT;
2262 target_ip = &(target_sd->sem_perm);
2263 host_ip->__key = tswapl(target_ip->__key);
2264 host_ip->uid = tswapl(target_ip->uid);
2265 host_ip->gid = tswapl(target_ip->gid);
2266 host_ip->cuid = tswapl(target_ip->cuid);
2267 host_ip->cgid = tswapl(target_ip->cgid);
2268 host_ip->mode = tswapl(target_ip->mode);
2269 unlock_user_struct(target_sd, target_addr, 0);
2270 return 0;
2271 }
2272
2273 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2274 struct ipc_perm *host_ip)
2275 {
2276 struct target_ipc_perm *target_ip;
2277 struct target_semid_ds *target_sd;
2278
2279 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2280 return -TARGET_EFAULT;
2281 target_ip = &(target_sd->sem_perm);
2282 target_ip->__key = tswapl(host_ip->__key);
2283 target_ip->uid = tswapl(host_ip->uid);
2284 target_ip->gid = tswapl(host_ip->gid);
2285 target_ip->cuid = tswapl(host_ip->cuid);
2286 target_ip->cgid = tswapl(host_ip->cgid);
2287 target_ip->mode = tswapl(host_ip->mode);
2288 unlock_user_struct(target_sd, target_addr, 1);
2289 return 0;
2290 }
2291
2292 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2293 abi_ulong target_addr)
2294 {
2295 struct target_semid_ds *target_sd;
2296
2297 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2298 return -TARGET_EFAULT;
2299 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2300 return -TARGET_EFAULT;
2301 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2302 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2303 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2304 unlock_user_struct(target_sd, target_addr, 0);
2305 return 0;
2306 }
2307
2308 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2309 struct semid_ds *host_sd)
2310 {
2311 struct target_semid_ds *target_sd;
2312
2313 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2314 return -TARGET_EFAULT;
2315 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2316 return -TARGET_EFAULT;;
2317 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2318 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2319 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2320 unlock_user_struct(target_sd, target_addr, 1);
2321 return 0;
2322 }
2323
2324 struct target_seminfo {
2325 int semmap;
2326 int semmni;
2327 int semmns;
2328 int semmnu;
2329 int semmsl;
2330 int semopm;
2331 int semume;
2332 int semusz;
2333 int semvmx;
2334 int semaem;
2335 };
2336
2337 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2338 struct seminfo *host_seminfo)
2339 {
2340 struct target_seminfo *target_seminfo;
2341 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2342 return -TARGET_EFAULT;
2343 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2344 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2345 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2346 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2347 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2348 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2349 __put_user(host_seminfo->semume, &target_seminfo->semume);
2350 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2351 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2352 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2353 unlock_user_struct(target_seminfo, target_addr, 1);
2354 return 0;
2355 }
2356
2357 union semun {
2358 int val;
2359 struct semid_ds *buf;
2360 unsigned short *array;
2361 struct seminfo *__buf;
2362 };
2363
2364 union target_semun {
2365 int val;
2366 abi_ulong buf;
2367 abi_ulong array;
2368 abi_ulong __buf;
2369 };
2370
2371 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2372 abi_ulong target_addr)
2373 {
2374 int nsems;
2375 unsigned short *array;
2376 union semun semun;
2377 struct semid_ds semid_ds;
2378 int i, ret;
2379
2380 semun.buf = &semid_ds;
2381
2382 ret = semctl(semid, 0, IPC_STAT, semun);
2383 if (ret == -1)
2384 return get_errno(ret);
2385
2386 nsems = semid_ds.sem_nsems;
2387
2388 *host_array = malloc(nsems*sizeof(unsigned short));
2389 array = lock_user(VERIFY_READ, target_addr,
2390 nsems*sizeof(unsigned short), 1);
2391 if (!array)
2392 return -TARGET_EFAULT;
2393
2394 for(i=0; i<nsems; i++) {
2395 __get_user((*host_array)[i], &array[i]);
2396 }
2397 unlock_user(array, target_addr, 0);
2398
2399 return 0;
2400 }
2401
2402 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2403 unsigned short **host_array)
2404 {
2405 int nsems;
2406 unsigned short *array;
2407 union semun semun;
2408 struct semid_ds semid_ds;
2409 int i, ret;
2410
2411 semun.buf = &semid_ds;
2412
2413 ret = semctl(semid, 0, IPC_STAT, semun);
2414 if (ret == -1)
2415 return get_errno(ret);
2416
2417 nsems = semid_ds.sem_nsems;
2418
2419 array = lock_user(VERIFY_WRITE, target_addr,
2420 nsems*sizeof(unsigned short), 0);
2421 if (!array)
2422 return -TARGET_EFAULT;
2423
2424 for(i=0; i<nsems; i++) {
2425 __put_user((*host_array)[i], &array[i]);
2426 }
2427 free(*host_array);
2428 unlock_user(array, target_addr, 1);
2429
2430 return 0;
2431 }
2432
2433 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2434 union target_semun target_su)
2435 {
2436 union semun arg;
2437 struct semid_ds dsarg;
2438 unsigned short *array = NULL;
2439 struct seminfo seminfo;
2440 abi_long ret = -TARGET_EINVAL;
2441 abi_long err;
2442 cmd &= 0xff;
2443
2444 switch( cmd ) {
2445 case GETVAL:
2446 case SETVAL:
2447 arg.val = tswapl(target_su.val);
2448 ret = get_errno(semctl(semid, semnum, cmd, arg));
2449 target_su.val = tswapl(arg.val);
2450 break;
2451 case GETALL:
2452 case SETALL:
2453 err = target_to_host_semarray(semid, &array, target_su.array);
2454 if (err)
2455 return err;
2456 arg.array = array;
2457 ret = get_errno(semctl(semid, semnum, cmd, arg));
2458 err = host_to_target_semarray(semid, target_su.array, &array);
2459 if (err)
2460 return err;
2461 break;
2462 case IPC_STAT:
2463 case IPC_SET:
2464 case SEM_STAT:
2465 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2466 if (err)
2467 return err;
2468 arg.buf = &dsarg;
2469 ret = get_errno(semctl(semid, semnum, cmd, arg));
2470 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2471 if (err)
2472 return err;
2473 break;
2474 case IPC_INFO:
2475 case SEM_INFO:
2476 arg.__buf = &seminfo;
2477 ret = get_errno(semctl(semid, semnum, cmd, arg));
2478 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2479 if (err)
2480 return err;
2481 break;
2482 case IPC_RMID:
2483 case GETPID:
2484 case GETNCNT:
2485 case GETZCNT:
2486 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2487 break;
2488 }
2489
2490 return ret;
2491 }
2492
2493 struct target_sembuf {
2494 unsigned short sem_num;
2495 short sem_op;
2496 short sem_flg;
2497 };
2498
2499 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2500 abi_ulong target_addr,
2501 unsigned nsops)
2502 {
2503 struct target_sembuf *target_sembuf;
2504 int i;
2505
2506 target_sembuf = lock_user(VERIFY_READ, target_addr,
2507 nsops*sizeof(struct target_sembuf), 1);
2508 if (!target_sembuf)
2509 return -TARGET_EFAULT;
2510
2511 for(i=0; i<nsops; i++) {
2512 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2513 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2514 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2515 }
2516
2517 unlock_user(target_sembuf, target_addr, 0);
2518
2519 return 0;
2520 }
2521
2522 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2523 {
2524 struct sembuf sops[nsops];
2525
2526 if (target_to_host_sembuf(sops, ptr, nsops))
2527 return -TARGET_EFAULT;
2528
2529 return semop(semid, sops, nsops);
2530 }
2531
2532 struct target_msqid_ds
2533 {
2534 struct target_ipc_perm msg_perm;
2535 abi_ulong msg_stime;
2536 #if TARGET_ABI_BITS == 32
2537 abi_ulong __unused1;
2538 #endif
2539 abi_ulong msg_rtime;
2540 #if TARGET_ABI_BITS == 32
2541 abi_ulong __unused2;
2542 #endif
2543 abi_ulong msg_ctime;
2544 #if TARGET_ABI_BITS == 32
2545 abi_ulong __unused3;
2546 #endif
2547 abi_ulong __msg_cbytes;
2548 abi_ulong msg_qnum;
2549 abi_ulong msg_qbytes;
2550 abi_ulong msg_lspid;
2551 abi_ulong msg_lrpid;
2552 abi_ulong __unused4;
2553 abi_ulong __unused5;
2554 };
2555
2556 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2557 abi_ulong target_addr)
2558 {
2559 struct target_msqid_ds *target_md;
2560
2561 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2562 return -TARGET_EFAULT;
2563 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2564 return -TARGET_EFAULT;
2565 host_md->msg_stime = tswapl(target_md->msg_stime);
2566 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2567 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2568 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2569 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2570 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2571 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2572 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2573 unlock_user_struct(target_md, target_addr, 0);
2574 return 0;
2575 }
2576
2577 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2578 struct msqid_ds *host_md)
2579 {
2580 struct target_msqid_ds *target_md;
2581
2582 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2583 return -TARGET_EFAULT;
2584 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2585 return -TARGET_EFAULT;
2586 target_md->msg_stime = tswapl(host_md->msg_stime);
2587 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2588 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2589 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2590 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2591 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2592 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2593 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2594 unlock_user_struct(target_md, target_addr, 1);
2595 return 0;
2596 }
2597
2598 struct target_msginfo {
2599 int msgpool;
2600 int msgmap;
2601 int msgmax;
2602 int msgmnb;
2603 int msgmni;
2604 int msgssz;
2605 int msgtql;
2606 unsigned short int msgseg;
2607 };
2608
2609 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2610 struct msginfo *host_msginfo)
2611 {
2612 struct target_msginfo *target_msginfo;
2613 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2614 return -TARGET_EFAULT;
2615 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2616 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2617 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2618 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2619 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2620 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2621 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2622 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2623 unlock_user_struct(target_msginfo, target_addr, 1);
2624 return 0;
2625 }
2626
2627 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2628 {
2629 struct msqid_ds dsarg;
2630 struct msginfo msginfo;
2631 abi_long ret = -TARGET_EINVAL;
2632
2633 cmd &= 0xff;
2634
2635 switch (cmd) {
2636 case IPC_STAT:
2637 case IPC_SET:
2638 case MSG_STAT:
2639 if (target_to_host_msqid_ds(&dsarg,ptr))
2640 return -TARGET_EFAULT;
2641 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2642 if (host_to_target_msqid_ds(ptr,&dsarg))
2643 return -TARGET_EFAULT;
2644 break;
2645 case IPC_RMID:
2646 ret = get_errno(msgctl(msgid, cmd, NULL));
2647 break;
2648 case IPC_INFO:
2649 case MSG_INFO:
2650 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2651 if (host_to_target_msginfo(ptr, &msginfo))
2652 return -TARGET_EFAULT;
2653 break;
2654 }
2655
2656 return ret;
2657 }
2658
2659 struct target_msgbuf {
2660 abi_long mtype;
2661 char mtext[1];
2662 };
2663
2664 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2665 unsigned int msgsz, int msgflg)
2666 {
2667 struct target_msgbuf *target_mb;
2668 struct msgbuf *host_mb;
2669 abi_long ret = 0;
2670
2671 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2672 return -TARGET_EFAULT;
2673 host_mb = malloc(msgsz+sizeof(long));
2674 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2675 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2676 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2677 free(host_mb);
2678 unlock_user_struct(target_mb, msgp, 0);
2679
2680 return ret;
2681 }
2682
2683 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2684 unsigned int msgsz, abi_long msgtyp,
2685 int msgflg)
2686 {
2687 struct target_msgbuf *target_mb;
2688 char *target_mtext;
2689 struct msgbuf *host_mb;
2690 abi_long ret = 0;
2691
2692 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2693 return -TARGET_EFAULT;
2694
2695 host_mb = malloc(msgsz+sizeof(long));
2696 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2697
2698 if (ret > 0) {
2699 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2700 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2701 if (!target_mtext) {
2702 ret = -TARGET_EFAULT;
2703 goto end;
2704 }
2705 memcpy(target_mb->mtext, host_mb->mtext, ret);
2706 unlock_user(target_mtext, target_mtext_addr, ret);
2707 }
2708
2709 target_mb->mtype = tswapl(host_mb->mtype);
2710 free(host_mb);
2711
2712 end:
2713 if (target_mb)
2714 unlock_user_struct(target_mb, msgp, 1);
2715 return ret;
2716 }
2717
2718 struct target_shmid_ds
2719 {
2720 struct target_ipc_perm shm_perm;
2721 abi_ulong shm_segsz;
2722 abi_ulong shm_atime;
2723 #if TARGET_ABI_BITS == 32
2724 abi_ulong __unused1;
2725 #endif
2726 abi_ulong shm_dtime;
2727 #if TARGET_ABI_BITS == 32
2728 abi_ulong __unused2;
2729 #endif
2730 abi_ulong shm_ctime;
2731 #if TARGET_ABI_BITS == 32
2732 abi_ulong __unused3;
2733 #endif
2734 int shm_cpid;
2735 int shm_lpid;
2736 abi_ulong shm_nattch;
2737 unsigned long int __unused4;
2738 unsigned long int __unused5;
2739 };
2740
2741 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2742 abi_ulong target_addr)
2743 {
2744 struct target_shmid_ds *target_sd;
2745
2746 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2747 return -TARGET_EFAULT;
2748 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2749 return -TARGET_EFAULT;
2750 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2751 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2752 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2753 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2754 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2755 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2756 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2757 unlock_user_struct(target_sd, target_addr, 0);
2758 return 0;
2759 }
2760
2761 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2762 struct shmid_ds *host_sd)
2763 {
2764 struct target_shmid_ds *target_sd;
2765
2766 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2767 return -TARGET_EFAULT;
2768 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2769 return -TARGET_EFAULT;
2770 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2771 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2772 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2773 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2774 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2775 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2776 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2777 unlock_user_struct(target_sd, target_addr, 1);
2778 return 0;
2779 }
2780
2781 struct target_shminfo {
2782 abi_ulong shmmax;
2783 abi_ulong shmmin;
2784 abi_ulong shmmni;
2785 abi_ulong shmseg;
2786 abi_ulong shmall;
2787 };
2788
2789 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2790 struct shminfo *host_shminfo)
2791 {
2792 struct target_shminfo *target_shminfo;
2793 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2794 return -TARGET_EFAULT;
2795 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2796 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2797 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2798 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2799 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2800 unlock_user_struct(target_shminfo, target_addr, 1);
2801 return 0;
2802 }
2803
2804 struct target_shm_info {
2805 int used_ids;
2806 abi_ulong shm_tot;
2807 abi_ulong shm_rss;
2808 abi_ulong shm_swp;
2809 abi_ulong swap_attempts;
2810 abi_ulong swap_successes;
2811 };
2812
2813 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2814 struct shm_info *host_shm_info)
2815 {
2816 struct target_shm_info *target_shm_info;
2817 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2818 return -TARGET_EFAULT;
2819 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2820 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2821 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2822 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2823 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2824 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2825 unlock_user_struct(target_shm_info, target_addr, 1);
2826 return 0;
2827 }
2828
2829 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2830 {
2831 struct shmid_ds dsarg;
2832 struct shminfo shminfo;
2833 struct shm_info shm_info;
2834 abi_long ret = -TARGET_EINVAL;
2835
2836 cmd &= 0xff;
2837
2838 switch(cmd) {
2839 case IPC_STAT:
2840 case IPC_SET:
2841 case SHM_STAT:
2842 if (target_to_host_shmid_ds(&dsarg, buf))
2843 return -TARGET_EFAULT;
2844 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2845 if (host_to_target_shmid_ds(buf, &dsarg))
2846 return -TARGET_EFAULT;
2847 break;
2848 case IPC_INFO:
2849 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2850 if (host_to_target_shminfo(buf, &shminfo))
2851 return -TARGET_EFAULT;
2852 break;
2853 case SHM_INFO:
2854 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2855 if (host_to_target_shm_info(buf, &shm_info))
2856 return -TARGET_EFAULT;
2857 break;
2858 case IPC_RMID:
2859 case SHM_LOCK:
2860 case SHM_UNLOCK:
2861 ret = get_errno(shmctl(shmid, cmd, NULL));
2862 break;
2863 }
2864
2865 return ret;
2866 }
2867
2868 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2869 {
2870 abi_long raddr;
2871 void *host_raddr;
2872 struct shmid_ds shm_info;
2873 int i,ret;
2874
2875 /* find out the length of the shared memory segment */
2876 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2877 if (is_error(ret)) {
2878 /* can't get length, bail out */
2879 return ret;
2880 }
2881
2882 mmap_lock();
2883
2884 if (shmaddr)
2885 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2886 else {
2887 abi_ulong mmap_start;
2888
2889 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2890
2891 if (mmap_start == -1) {
2892 errno = ENOMEM;
2893 host_raddr = (void *)-1;
2894 } else
2895 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2896 }
2897
2898 if (host_raddr == (void *)-1) {
2899 mmap_unlock();
2900 return get_errno((long)host_raddr);
2901 }
2902 raddr=h2g((unsigned long)host_raddr);
2903
2904 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2905 PAGE_VALID | PAGE_READ |
2906 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2907
2908 for (i = 0; i < N_SHM_REGIONS; i++) {
2909 if (shm_regions[i].start == 0) {
2910 shm_regions[i].start = raddr;
2911 shm_regions[i].size = shm_info.shm_segsz;
2912 break;
2913 }
2914 }
2915
2916 mmap_unlock();
2917 return raddr;
2918
2919 }
2920
2921 static inline abi_long do_shmdt(abi_ulong shmaddr)
2922 {
2923 int i;
2924
2925 for (i = 0; i < N_SHM_REGIONS; ++i) {
2926 if (shm_regions[i].start == shmaddr) {
2927 shm_regions[i].start = 0;
2928 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2929 break;
2930 }
2931 }
2932
2933 return get_errno(shmdt(g2h(shmaddr)));
2934 }
2935
2936 #ifdef TARGET_NR_ipc
2937 /* ??? This only works with linear mappings. */
2938 /* do_ipc() must return target values and target errnos. */
2939 static abi_long do_ipc(unsigned int call, int first,
2940 int second, int third,
2941 abi_long ptr, abi_long fifth)
2942 {
2943 int version;
2944 abi_long ret = 0;
2945
2946 version = call >> 16;
2947 call &= 0xffff;
2948
2949 switch (call) {
2950 case IPCOP_semop:
2951 ret = do_semop(first, ptr, second);
2952 break;
2953
2954 case IPCOP_semget:
2955 ret = get_errno(semget(first, second, third));
2956 break;
2957
2958 case IPCOP_semctl:
2959 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2960 break;
2961
2962 case IPCOP_msgget:
2963 ret = get_errno(msgget(first, second));
2964 break;
2965
2966 case IPCOP_msgsnd:
2967 ret = do_msgsnd(first, ptr, second, third);
2968 break;
2969
2970 case IPCOP_msgctl:
2971 ret = do_msgctl(first, second, ptr);
2972 break;
2973
2974 case IPCOP_msgrcv:
2975 switch (version) {
2976 case 0:
2977 {
2978 struct target_ipc_kludge {
2979 abi_long msgp;
2980 abi_long msgtyp;
2981 } *tmp;
2982
2983 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2984 ret = -TARGET_EFAULT;
2985 break;
2986 }
2987
2988 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2989
2990 unlock_user_struct(tmp, ptr, 0);
2991 break;
2992 }
2993 default:
2994 ret = do_msgrcv(first, ptr, second, fifth, third);
2995 }
2996 break;
2997
2998 case IPCOP_shmat:
2999 switch (version) {
3000 default:
3001 {
3002 abi_ulong raddr;
3003 raddr = do_shmat(first, ptr, second);
3004 if (is_error(raddr))
3005 return get_errno(raddr);
3006 if (put_user_ual(raddr, third))
3007 return -TARGET_EFAULT;
3008 break;
3009 }
3010 case 1:
3011 ret = -TARGET_EINVAL;
3012 break;
3013 }
3014 break;
3015 case IPCOP_shmdt:
3016 ret = do_shmdt(ptr);
3017 break;
3018
3019 case IPCOP_shmget:
3020 /* IPC_* flag values are the same on all linux platforms */
3021 ret = get_errno(shmget(first, second, third));
3022 break;
3023
3024 /* IPC_* and SHM_* command values are the same on all linux platforms */
3025 case IPCOP_shmctl:
3026 ret = do_shmctl(first, second, third);
3027 break;
3028 default:
3029 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3030 ret = -TARGET_ENOSYS;
3031 break;
3032 }
3033 return ret;
3034 }
3035 #endif
3036
3037 /* kernel structure types definitions */
3038
3039 #define STRUCT(name, ...) STRUCT_ ## name,
3040 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3041 enum {
3042 #include "syscall_types.h"
3043 };
3044 #undef STRUCT
3045 #undef STRUCT_SPECIAL
3046
3047 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3048 #define STRUCT_SPECIAL(name)
3049 #include "syscall_types.h"
3050 #undef STRUCT
3051 #undef STRUCT_SPECIAL
3052
3053 typedef struct IOCTLEntry IOCTLEntry;
3054
3055 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3056 int fd, abi_long cmd, abi_long arg);
3057
3058 struct IOCTLEntry {
3059 unsigned int target_cmd;
3060 unsigned int host_cmd;
3061 const char *name;
3062 int access;
3063 do_ioctl_fn *do_ioctl;
3064 const argtype arg_type[5];
3065 };
3066
3067 #define IOC_R 0x0001
3068 #define IOC_W 0x0002
3069 #define IOC_RW (IOC_R | IOC_W)
3070
3071 #define MAX_STRUCT_SIZE 4096
3072
3073 #ifdef CONFIG_FIEMAP
3074 /* So fiemap access checks don't overflow on 32 bit systems.
3075 * This is very slightly smaller than the limit imposed by
3076 * the underlying kernel.
3077 */
3078 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3079 / sizeof(struct fiemap_extent))
3080
3081 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3082 int fd, abi_long cmd, abi_long arg)
3083 {
3084 /* The parameter for this ioctl is a struct fiemap followed
3085 * by an array of struct fiemap_extent whose size is set
3086 * in fiemap->fm_extent_count. The array is filled in by the
3087 * ioctl.
3088 */
3089 int target_size_in, target_size_out;
3090 struct fiemap *fm;
3091 const argtype *arg_type = ie->arg_type;
3092 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3093 void *argptr, *p;
3094 abi_long ret;
3095 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3096 uint32_t outbufsz;
3097 int free_fm = 0;
3098
3099 assert(arg_type[0] == TYPE_PTR);
3100 assert(ie->access == IOC_RW);
3101 arg_type++;
3102 target_size_in = thunk_type_size(arg_type, 0);
3103 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3104 if (!argptr) {
3105 return -TARGET_EFAULT;
3106 }
3107 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3108 unlock_user(argptr, arg, 0);
3109 fm = (struct fiemap *)buf_temp;
3110 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3111 return -TARGET_EINVAL;
3112 }
3113
3114 outbufsz = sizeof (*fm) +
3115 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3116
3117 if (outbufsz > MAX_STRUCT_SIZE) {
3118 /* We can't fit all the extents into the fixed size buffer.
3119 * Allocate one that is large enough and use it instead.
3120 */
3121 fm = malloc(outbufsz);
3122 if (!fm) {
3123 return -TARGET_ENOMEM;
3124 }
3125 memcpy(fm, buf_temp, sizeof(struct fiemap));
3126 free_fm = 1;
3127 }
3128 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3129 if (!is_error(ret)) {
3130 target_size_out = target_size_in;
3131 /* An extent_count of 0 means we were only counting the extents
3132 * so there are no structs to copy
3133 */
3134 if (fm->fm_extent_count != 0) {
3135 target_size_out += fm->fm_mapped_extents * extent_size;
3136 }
3137 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3138 if (!argptr) {
3139 ret = -TARGET_EFAULT;
3140 } else {
3141 /* Convert the struct fiemap */
3142 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3143 if (fm->fm_extent_count != 0) {
3144 p = argptr + target_size_in;
3145 /* ...and then all the struct fiemap_extents */
3146 for (i = 0; i < fm->fm_mapped_extents; i++) {
3147 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3148 THUNK_TARGET);
3149 p += extent_size;
3150 }
3151 }
3152 unlock_user(argptr, arg, target_size_out);
3153 }
3154 }
3155 if (free_fm) {
3156 free(fm);
3157 }
3158 return ret;
3159 }
3160 #endif
3161
3162 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3163 int fd, abi_long cmd, abi_long arg)
3164 {
3165 const argtype *arg_type = ie->arg_type;
3166 int target_size;
3167 void *argptr;
3168 int ret;
3169 struct ifconf *host_ifconf;
3170 uint32_t outbufsz;
3171 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3172 int target_ifreq_size;
3173 int nb_ifreq;
3174 int free_buf = 0;
3175 int i;
3176 int target_ifc_len;
3177 abi_long target_ifc_buf;
3178 int host_ifc_len;
3179 char *host_ifc_buf;
3180
3181 assert(arg_type[0] == TYPE_PTR);
3182 assert(ie->access == IOC_RW);
3183
3184 arg_type++;
3185 target_size = thunk_type_size(arg_type, 0);
3186
3187 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3188 if (!argptr)
3189 return -TARGET_EFAULT;
3190 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3191 unlock_user(argptr, arg, 0);
3192
3193 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3194 target_ifc_len = host_ifconf->ifc_len;
3195 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3196
3197 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3198 nb_ifreq = target_ifc_len / target_ifreq_size;
3199 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3200
3201 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3202 if (outbufsz > MAX_STRUCT_SIZE) {
3203 /* We can't fit all the extents into the fixed size buffer.
3204 * Allocate one that is large enough and use it instead.
3205 */
3206 host_ifconf = malloc(outbufsz);
3207 if (!host_ifconf) {
3208 return -TARGET_ENOMEM;
3209 }
3210 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3211 free_buf = 1;
3212 }
3213 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3214
3215 host_ifconf->ifc_len = host_ifc_len;
3216 host_ifconf->ifc_buf = host_ifc_buf;
3217
3218 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3219 if (!is_error(ret)) {
3220 /* convert host ifc_len to target ifc_len */
3221
3222 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3223 target_ifc_len = nb_ifreq * target_ifreq_size;
3224 host_ifconf->ifc_len = target_ifc_len;
3225
3226 /* restore target ifc_buf */
3227
3228 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3229
3230 /* copy struct ifconf to target user */
3231
3232 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3233 if (!argptr)
3234 return -TARGET_EFAULT;
3235 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3236 unlock_user(argptr, arg, target_size);
3237
3238 /* copy ifreq[] to target user */
3239
3240 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3241 for (i = 0; i < nb_ifreq ; i++) {
3242 thunk_convert(argptr + i * target_ifreq_size,
3243 host_ifc_buf + i * sizeof(struct ifreq),
3244 ifreq_arg_type, THUNK_TARGET);
3245 }
3246 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3247 }
3248
3249 if (free_buf) {
3250 free(host_ifconf);
3251 }
3252
3253 return ret;
3254 }
3255
3256 static IOCTLEntry ioctl_entries[] = {
3257 #define IOCTL(cmd, access, ...) \
3258 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3259 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3260 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3261 #include "ioctls.h"
3262 { 0, 0, },
3263 };
3264
3265 /* ??? Implement proper locking for ioctls. */
3266 /* do_ioctl() Must return target values and target errnos. */
3267 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3268 {
3269 const IOCTLEntry *ie;
3270 const argtype *arg_type;
3271 abi_long ret;
3272 uint8_t buf_temp[MAX_STRUCT_SIZE];
3273 int target_size;
3274 void *argptr;
3275
3276 ie = ioctl_entries;
3277 for(;;) {
3278 if (ie->target_cmd == 0) {
3279 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3280 return -TARGET_ENOSYS;
3281 }
3282 if (ie->target_cmd == cmd)
3283 break;
3284 ie++;
3285 }
3286 arg_type = ie->arg_type;
3287 #if defined(DEBUG)
3288 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3289 #endif
3290 if (ie->do_ioctl) {
3291 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3292 }
3293
3294 switch(arg_type[0]) {
3295 case TYPE_NULL:
3296 /* no argument */
3297 ret = get_errno(ioctl(fd, ie->host_cmd));
3298 break;
3299 case TYPE_PTRVOID:
3300 case TYPE_INT:
3301 /* int argment */
3302 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3303 break;
3304 case TYPE_PTR:
3305 arg_type++;
3306 target_size = thunk_type_size(arg_type, 0);
3307 switch(ie->access) {
3308 case IOC_R:
3309 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3310 if (!is_error(ret)) {
3311 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3312 if (!argptr)
3313 return -TARGET_EFAULT;
3314 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3315 unlock_user(argptr, arg, target_size);
3316 }
3317 break;
3318 case IOC_W:
3319 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3320 if (!argptr)
3321 return -TARGET_EFAULT;
3322 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3323 unlock_user(argptr, arg, 0);
3324 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3325 break;
3326 default:
3327 case IOC_RW:
3328 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3329 if (!argptr)
3330 return -TARGET_EFAULT;
3331 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3332 unlock_user(argptr, arg, 0);
3333 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3334 if (!is_error(ret)) {
3335 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3336 if (!argptr)
3337 return -TARGET_EFAULT;
3338 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3339 unlock_user(argptr, arg, target_size);
3340 }
3341 break;
3342 }
3343 break;
3344 default:
3345 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3346 (long)cmd, arg_type[0]);
3347 ret = -TARGET_ENOSYS;
3348 break;
3349 }
3350 return ret;
3351 }
3352
3353 static const bitmask_transtbl iflag_tbl[] = {
3354 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3355 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3356 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3357 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3358 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3359 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3360 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3361 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3362 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3363 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3364 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3365 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3366 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3367 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3368 { 0, 0, 0, 0 }
3369 };
3370
3371 static const bitmask_transtbl oflag_tbl[] = {
3372 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3373 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3374 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3375 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3376 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3377 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3378 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3379 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3380 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3381 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3382 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3383 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3384 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3385 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3386 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3387 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3388 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3389 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3390 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3391 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3392 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3393 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3394 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3395 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3396 { 0, 0, 0, 0 }
3397 };
3398
3399 static const bitmask_transtbl cflag_tbl[] = {
3400 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3401 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3402 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3403 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3404 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3405 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3406 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3407 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3408 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3409 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3410 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3411 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3412 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3413 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3414 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3415 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3416 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3417 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3418 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3419 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3420 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3421 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3422 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3423 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3424 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3425 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3426 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3427 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3428 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3429 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3430 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3431 { 0, 0, 0, 0 }
3432 };
3433
3434 static const bitmask_transtbl lflag_tbl[] = {
3435 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3436 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3437 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3438 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3439 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3440 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3441 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3442 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3443 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3444 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3445 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3446 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3447 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3448 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3449 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3450 { 0, 0, 0, 0 }
3451 };
3452
3453 static void target_to_host_termios (void *dst, const void *src)
3454 {
3455 struct host_termios *host = dst;
3456 const struct target_termios *target = src;
3457
3458 host->c_iflag =
3459 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3460 host->c_oflag =
3461 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3462 host->c_cflag =
3463 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3464 host->c_lflag =
3465 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3466 host->c_line = target->c_line;
3467
3468 memset(host->c_cc, 0, sizeof(host->c_cc));
3469 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3470 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3471 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3472 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3473 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3474 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3475 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3476 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3477 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3478 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3479 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3480 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3481 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3482 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3483 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3484 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3485 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3486 }
3487
3488 static void host_to_target_termios (void *dst, const void *src)
3489 {
3490 struct target_termios *target = dst;
3491 const struct host_termios *host = src;
3492
3493 target->c_iflag =
3494 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3495 target->c_oflag =
3496 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3497 target->c_cflag =
3498 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3499 target->c_lflag =
3500 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3501 target->c_line = host->c_line;
3502
3503 memset(target->c_cc, 0, sizeof(target->c_cc));
3504 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3505 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3506 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3507 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3508 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3509 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3510 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3511 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3512 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3513 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3514 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3515 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3516 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3517 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3518 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3519 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3520 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3521 }
3522
3523 static const StructEntry struct_termios_def = {
3524 .convert = { host_to_target_termios, target_to_host_termios },
3525 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3526 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3527 };
3528
3529 static bitmask_transtbl mmap_flags_tbl[] = {
3530 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3531 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3532 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3533 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3534 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3535 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3536 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3537 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3538 { 0, 0, 0, 0 }
3539 };
3540
3541 #if defined(TARGET_I386)
3542
3543 /* NOTE: there is really one LDT for all the threads */
3544 static uint8_t *ldt_table;
3545
3546 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3547 {
3548 int size;
3549 void *p;
3550
3551 if (!ldt_table)
3552 return 0;
3553 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3554 if (size > bytecount)
3555 size = bytecount;
3556 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3557 if (!p)
3558 return -TARGET_EFAULT;
3559 /* ??? Should this by byteswapped? */
3560 memcpy(p, ldt_table, size);
3561 unlock_user(p, ptr, size);
3562 return size;
3563 }
3564
3565 /* XXX: add locking support */
3566 static abi_long write_ldt(CPUX86State *env,
3567 abi_ulong ptr, unsigned long bytecount, int oldmode)
3568 {
3569 struct target_modify_ldt_ldt_s ldt_info;
3570 struct target_modify_ldt_ldt_s *target_ldt_info;
3571 int seg_32bit, contents, read_exec_only, limit_in_pages;
3572 int seg_not_present, useable, lm;
3573 uint32_t *lp, entry_1, entry_2;
3574
3575 if (bytecount != sizeof(ldt_info))
3576 return -TARGET_EINVAL;
3577 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3578 return -TARGET_EFAULT;
3579 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3580 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3581 ldt_info.limit = tswap32(target_ldt_info->limit);
3582 ldt_info.flags = tswap32(target_ldt_info->flags);
3583 unlock_user_struct(target_ldt_info, ptr, 0);
3584
3585 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3586 return -TARGET_EINVAL;
3587 seg_32bit = ldt_info.flags & 1;
3588 contents = (ldt_info.flags >> 1) & 3;
3589 read_exec_only = (ldt_info.flags >> 3) & 1;
3590 limit_in_pages = (ldt_info.flags >> 4) & 1;
3591 seg_not_present = (ldt_info.flags >> 5) & 1;
3592 useable = (ldt_info.flags >> 6) & 1;
3593 #ifdef TARGET_ABI32
3594 lm = 0;
3595 #else
3596 lm = (ldt_info.flags >> 7) & 1;
3597 #endif
3598 if (contents == 3) {
3599 if (oldmode)
3600 return -TARGET_EINVAL;
3601 if (seg_not_present == 0)
3602 return -TARGET_EINVAL;
3603 }
3604 /* allocate the LDT */
3605 if (!ldt_table) {
3606 env->ldt.base = target_mmap(0,
3607 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3608 PROT_READ|PROT_WRITE,
3609 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3610 if (env->ldt.base == -1)
3611 return -TARGET_ENOMEM;
3612 memset(g2h(env->ldt.base), 0,
3613 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3614 env->ldt.limit = 0xffff;
3615 ldt_table = g2h(env->ldt.base);
3616 }
3617
3618 /* NOTE: same code as Linux kernel */
3619 /* Allow LDTs to be cleared by the user. */
3620 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3621 if (oldmode ||
3622 (contents == 0 &&
3623 read_exec_only == 1 &&
3624 seg_32bit == 0 &&
3625 limit_in_pages == 0 &&
3626 seg_not_present == 1 &&
3627 useable == 0 )) {
3628 entry_1 = 0;
3629 entry_2 = 0;
3630 goto install;
3631 }
3632 }
3633
3634 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3635 (ldt_info.limit & 0x0ffff);
3636 entry_2 = (ldt_info.base_addr & 0xff000000) |
3637 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3638 (ldt_info.limit & 0xf0000) |
3639 ((read_exec_only ^ 1) << 9) |
3640 (contents << 10) |
3641 ((seg_not_present ^ 1) << 15) |
3642 (seg_32bit << 22) |
3643 (limit_in_pages << 23) |
3644 (lm << 21) |
3645 0x7000;
3646 if (!oldmode)
3647 entry_2 |= (useable << 20);
3648
3649 /* Install the new entry ... */
3650 install:
3651 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3652 lp[0] = tswap32(entry_1);
3653 lp[1] = tswap32(entry_2);
3654 return 0;
3655 }
3656
3657 /* specific and weird i386 syscalls */
3658 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3659 unsigned long bytecount)
3660 {
3661 abi_long ret;
3662
3663 switch (func) {
3664 case 0:
3665 ret = read_ldt(ptr, bytecount);
3666 break;
3667 case 1:
3668 ret = write_ldt(env, ptr, bytecount, 1);
3669 break;
3670 case 0x11:
3671 ret = write_ldt(env, ptr, bytecount, 0);
3672 break;
3673 default:
3674 ret = -TARGET_ENOSYS;
3675 break;
3676 }
3677 return ret;
3678 }
3679
3680 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3681 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3682 {
3683 uint64_t *gdt_table = g2h(env->gdt.base);
3684 struct target_modify_ldt_ldt_s ldt_info;
3685 struct target_modify_ldt_ldt_s *target_ldt_info;
3686 int seg_32bit, contents, read_exec_only, limit_in_pages;
3687 int seg_not_present, useable, lm;
3688 uint32_t *lp, entry_1, entry_2;
3689 int i;
3690
3691 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3692 if (!target_ldt_info)
3693 return -TARGET_EFAULT;
3694 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3695 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3696 ldt_info.limit = tswap32(target_ldt_info->limit);
3697 ldt_info.flags = tswap32(target_ldt_info->flags);
3698 if (ldt_info.entry_number == -1) {
3699 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3700 if (gdt_table[i] == 0) {
3701 ldt_info.entry_number = i;
3702 target_ldt_info->entry_number = tswap32(i);
3703 break;
3704 }
3705 }
3706 }
3707 unlock_user_struct(target_ldt_info, ptr, 1);
3708
3709 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3710 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3711 return -TARGET_EINVAL;
3712 seg_32bit = ldt_info.flags & 1;
3713 contents = (ldt_info.flags >> 1) & 3;
3714 read_exec_only = (ldt_info.flags >> 3) & 1;
3715 limit_in_pages = (ldt_info.flags >> 4) & 1;
3716 seg_not_present = (ldt_info.flags >> 5) & 1;
3717 useable = (ldt_info.flags >> 6) & 1;
3718 #ifdef TARGET_ABI32
3719 lm = 0;
3720 #else
3721 lm = (ldt_info.flags >> 7) & 1;
3722 #endif
3723
3724 if (contents == 3) {
3725 if (seg_not_present == 0)
3726 return -TARGET_EINVAL;
3727 }
3728
3729 /* NOTE: same code as Linux kernel */
3730 /* Allow LDTs to be cleared by the user. */
3731 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3732 if ((contents == 0 &&
3733 read_exec_only == 1 &&
3734 seg_32bit == 0 &&
3735 limit_in_pages == 0 &&
3736 seg_not_present == 1 &&
3737 useable == 0 )) {
3738 entry_1 = 0;
3739 entry_2 = 0;
3740 goto install;
3741 }
3742 }
3743
3744 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3745 (ldt_info.limit & 0x0ffff);
3746 entry_2 = (ldt_info.base_addr & 0xff000000) |
3747 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3748 (ldt_info.limit & 0xf0000) |
3749 ((read_exec_only ^ 1) << 9) |
3750 (contents << 10) |
3751 ((seg_not_present ^ 1) << 15) |
3752 (seg_32bit << 22) |
3753 (limit_in_pages << 23) |
3754 (useable << 20) |
3755 (lm << 21) |
3756 0x7000;
3757
3758 /* Install the new entry ... */
3759 install:
3760 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3761 lp[0] = tswap32(entry_1);
3762 lp[1] = tswap32(entry_2);
3763 return 0;
3764 }
3765
3766 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3767 {
3768 struct target_modify_ldt_ldt_s *target_ldt_info;
3769 uint64_t *gdt_table = g2h(env->gdt.base);
3770 uint32_t base_addr, limit, flags;
3771 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3772 int seg_not_present, useable, lm;
3773 uint32_t *lp, entry_1, entry_2;
3774
3775 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3776 if (!target_ldt_info)
3777 return -TARGET_EFAULT;
3778 idx = tswap32(target_ldt_info->entry_number);
3779 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3780 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3781 unlock_user_struct(target_ldt_info, ptr, 1);
3782 return -TARGET_EINVAL;
3783 }
3784 lp = (uint32_t *)(gdt_table + idx);
3785 entry_1 = tswap32(lp[0]);
3786 entry_2 = tswap32(lp[1]);
3787
3788 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3789 contents = (entry_2 >> 10) & 3;
3790 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3791 seg_32bit = (entry_2 >> 22) & 1;
3792 limit_in_pages = (entry_2 >> 23) & 1;
3793 useable = (entry_2 >> 20) & 1;
3794 #ifdef TARGET_ABI32
3795 lm = 0;
3796 #else
3797 lm = (entry_2 >> 21) & 1;
3798 #endif
3799 flags = (seg_32bit << 0) | (contents << 1) |
3800 (read_exec_only << 3) | (limit_in_pages << 4) |
3801 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3802 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3803 base_addr = (entry_1 >> 16) |
3804 (entry_2 & 0xff000000) |
3805 ((entry_2 & 0xff) << 16);
3806 target_ldt_info->base_addr = tswapl(base_addr);
3807 target_ldt_info->limit = tswap32(limit);
3808 target_ldt_info->flags = tswap32(flags);
3809 unlock_user_struct(target_ldt_info, ptr, 1);
3810 return 0;
3811 }
3812 #endif /* TARGET_I386 && TARGET_ABI32 */
3813
3814 #ifndef TARGET_ABI32
3815 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3816 {
3817 abi_long ret = 0;
3818 abi_ulong val;
3819 int idx;
3820
3821 switch(code) {
3822 case TARGET_ARCH_SET_GS:
3823 case TARGET_ARCH_SET_FS:
3824 if (code == TARGET_ARCH_SET_GS)
3825 idx = R_GS;
3826 else
3827 idx = R_FS;
3828 cpu_x86_load_seg(env, idx, 0);
3829 env->segs[idx].base = addr;
3830 break;
3831 case TARGET_ARCH_GET_GS:
3832 case TARGET_ARCH_GET_FS:
3833 if (code == TARGET_ARCH_GET_GS)
3834 idx = R_GS;
3835 else
3836 idx = R_FS;
3837 val = env->segs[idx].base;
3838 if (put_user(val, addr, abi_ulong))
3839 ret = -TARGET_EFAULT;
3840 break;
3841 default:
3842 ret = -TARGET_EINVAL;
3843 break;
3844 }
3845 return ret;
3846 }
3847 #endif
3848
3849 #endif /* defined(TARGET_I386) */
3850
3851 #define NEW_STACK_SIZE 0x40000
3852
3853 #if defined(CONFIG_USE_NPTL)
3854
3855 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3856 typedef struct {
3857 CPUState *env;
3858 pthread_mutex_t mutex;
3859 pthread_cond_t cond;
3860 pthread_t thread;
3861 uint32_t tid;
3862 abi_ulong child_tidptr;
3863 abi_ulong parent_tidptr;
3864 sigset_t sigmask;
3865 } new_thread_info;
3866
3867 static void *clone_func(void *arg)
3868 {
3869 new_thread_info *info = arg;
3870 CPUState *env;
3871 TaskState *ts;
3872
3873 env = info->env;
3874 thread_env = env;
3875 ts = (TaskState *)thread_env->opaque;
3876 info->tid = gettid();
3877 env->host_tid = info->tid;
3878 task_settid(ts);
3879 if (info->child_tidptr)
3880 put_user_u32(info->tid, info->child_tidptr);
3881 if (info->parent_tidptr)
3882 put_user_u32(info->tid, info->parent_tidptr);
3883 /* Enable signals. */
3884 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3885 /* Signal to the parent that we're ready. */
3886 pthread_mutex_lock(&info->mutex);
3887 pthread_cond_broadcast(&info->cond);
3888 pthread_mutex_unlock(&info->mutex);
3889 /* Wait until the parent has finshed initializing the tls state. */
3890 pthread_mutex_lock(&clone_lock);
3891 pthread_mutex_unlock(&clone_lock);
3892 cpu_loop(env);
3893 /* never exits */
3894 return NULL;
3895 }
3896 #else
3897
3898 static int clone_func(void *arg)
3899 {
3900 CPUState *env = arg;
3901 cpu_loop(env);
3902 /* never exits */
3903 return 0;
3904 }
3905 #endif
3906
3907 /* do_fork() Must return host values and target errnos (unlike most
3908 do_*() functions). */
3909 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3910 abi_ulong parent_tidptr, target_ulong newtls,
3911 abi_ulong child_tidptr)
3912 {
3913 int ret;
3914 TaskState *ts;
3915 CPUState *new_env;
3916 #if defined(CONFIG_USE_NPTL)
3917 unsigned int nptl_flags;
3918 sigset_t sigmask;
3919 #else
3920 uint8_t *new_stack;
3921 #endif
3922
3923 /* Emulate vfork() with fork() */
3924 if (flags & CLONE_VFORK)
3925 flags &= ~(CLONE_VFORK | CLONE_VM);
3926
3927 if (flags & CLONE_VM) {
3928 TaskState *parent_ts = (TaskState *)env->opaque;
3929 #if defined(CONFIG_USE_NPTL)
3930 new_thread_info info;
3931 pthread_attr_t attr;
3932 #endif
3933 ts = qemu_mallocz(sizeof(TaskState));
3934 init_task_state(ts);
3935 /* we create a new CPU instance. */
3936 new_env = cpu_copy(env);
3937 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3938 cpu_reset(new_env);
3939 #endif
3940 /* Init regs that differ from the parent. */
3941 cpu_clone_regs(new_env, newsp);
3942 new_env->opaque = ts;
3943 ts->bprm = parent_ts->bprm;
3944 ts->info = parent_ts->info;
3945 #if defined(CONFIG_USE_NPTL)
3946 nptl_flags = flags;
3947 flags &= ~CLONE_NPTL_FLAGS2;
3948
3949 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3950 ts->child_tidptr = child_tidptr;
3951 }
3952
3953 if (nptl_flags & CLONE_SETTLS)
3954 cpu_set_tls (new_env, newtls);
3955
3956 /* Grab a mutex so that thread setup appears atomic. */
3957 pthread_mutex_lock(&clone_lock);
3958
3959 memset(&info, 0, sizeof(info));
3960 pthread_mutex_init(&info.mutex, NULL);
3961 pthread_mutex_lock(&info.mutex);
3962 pthread_cond_init(&info.cond, NULL);
3963 info.env = new_env;
3964 if (nptl_flags & CLONE_CHILD_SETTID)
3965 info.child_tidptr = child_tidptr;
3966 if (nptl_flags & CLONE_PARENT_SETTID)
3967 info.parent_tidptr = parent_tidptr;
3968
3969 ret = pthread_attr_init(&attr);
3970 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
3971 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
3972 /* It is not safe to deliver signals until the child has finished
3973 initializing, so temporarily block all signals. */
3974 sigfillset(&sigmask);
3975 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3976
3977 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3978 /* TODO: Free new CPU state if thread creation failed. */
3979
3980 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3981 pthread_attr_destroy(&attr);
3982 if (ret == 0) {
3983 /* Wait for the child to initialize. */
3984 pthread_cond_wait(&info.cond, &info.mutex);
3985 ret = info.tid;
3986 if (flags & CLONE_PARENT_SETTID)
3987 put_user_u32(ret, parent_tidptr);
3988 } else {
3989 ret = -1;
3990 }
3991 pthread_mutex_unlock(&info.mutex);
3992 pthread_cond_destroy(&info.cond);
3993 pthread_mutex_destroy(&info.mutex);
3994 pthread_mutex_unlock(&clone_lock);
3995 #else
3996 if (flags & CLONE_NPTL_FLAGS2)
3997 return -EINVAL;
3998 /* This is probably going to die very quickly, but do it anyway. */
3999 new_stack = qemu_mallocz (NEW_STACK_SIZE);
4000 #ifdef __ia64__
4001 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4002 #else
4003 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4004 #endif
4005 #endif
4006 } else {
4007 /* if no CLONE_VM, we consider it is a fork */
4008 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4009 return -EINVAL;
4010 fork_start();
4011 ret = fork();
4012 if (ret == 0) {
4013 /* Child Process. */
4014 cpu_clone_regs(env, newsp);
4015 fork_end(1);
4016 #if defined(CONFIG_USE_NPTL)
4017 /* There is a race condition here. The parent process could
4018 theoretically read the TID in the child process before the child
4019 tid is set. This would require using either ptrace
4020 (not implemented) or having *_tidptr to point at a shared memory
4021 mapping. We can't repeat the spinlock hack used above because
4022 the child process gets its own copy of the lock. */
4023 if (flags & CLONE_CHILD_SETTID)
4024 put_user_u32(gettid(), child_tidptr);
4025 if (flags & CLONE_PARENT_SETTID)
4026 put_user_u32(gettid(), parent_tidptr);
4027 ts = (TaskState *)env->opaque;
4028 if (flags & CLONE_SETTLS)
4029 cpu_set_tls (env, newtls);
4030 if (flags & CLONE_CHILD_CLEARTID)
4031 ts->child_tidptr = child_tidptr;
4032 #endif
4033 } else {
4034 fork_end(0);
4035 }
4036 }
4037 return ret;
4038 }
4039
4040 /* warning : doesn't handle linux specific flags... */
4041 static int target_to_host_fcntl_cmd(int cmd)
4042 {
4043 switch(cmd) {
4044 case TARGET_F_DUPFD:
4045 case TARGET_F_GETFD:
4046 case TARGET_F_SETFD:
4047 case TARGET_F_GETFL:
4048 case TARGET_F_SETFL:
4049 return cmd;
4050 case TARGET_F_GETLK:
4051 return F_GETLK;
4052 case TARGET_F_SETLK:
4053 return F_SETLK;
4054 case TARGET_F_SETLKW:
4055 return F_SETLKW;
4056 case TARGET_F_GETOWN:
4057 return F_GETOWN;
4058 case TARGET_F_SETOWN:
4059 return F_SETOWN;
4060 case TARGET_F_GETSIG:
4061 return F_GETSIG;
4062 case TARGET_F_SETSIG:
4063 return F_SETSIG;
4064 #if TARGET_ABI_BITS == 32
4065 case TARGET_F_GETLK64:
4066 return F_GETLK64;
4067 case TARGET_F_SETLK64:
4068 return F_SETLK64;
4069 case TARGET_F_SETLKW64:
4070 return F_SETLKW64;
4071 #endif
4072 case TARGET_F_SETLEASE:
4073 return F_SETLEASE;
4074 case TARGET_F_GETLEASE:
4075 return F_GETLEASE;
4076 #ifdef F_DUPFD_CLOEXEC
4077 case TARGET_F_DUPFD_CLOEXEC:
4078 return F_DUPFD_CLOEXEC;
4079 #endif
4080 case TARGET_F_NOTIFY:
4081 return F_NOTIFY;
4082 default:
4083 return -TARGET_EINVAL;
4084 }
4085 return -TARGET_EINVAL;
4086 }
4087
4088 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4089 {
4090 struct flock fl;
4091 struct target_flock *target_fl;
4092 struct flock64 fl64;
4093 struct target_flock64 *target_fl64;
4094 abi_long ret;
4095 int host_cmd = target_to_host_fcntl_cmd(cmd);
4096
4097 if (host_cmd == -TARGET_EINVAL)
4098 return host_cmd;
4099
4100 switch(cmd) {
4101 case TARGET_F_GETLK:
4102 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4103 return -TARGET_EFAULT;
4104 fl.l_type = tswap16(target_fl->l_type);
4105 fl.l_whence = tswap16(target_fl->l_whence);
4106 fl.l_start = tswapl(target_fl->l_start);
4107 fl.l_len = tswapl(target_fl->l_len);
4108 fl.l_pid = tswap32(target_fl->l_pid);
4109 unlock_user_struct(target_fl, arg, 0);
4110 ret = get_errno(fcntl(fd, host_cmd, &fl));
4111 if (ret == 0) {
4112 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4113 return -TARGET_EFAULT;
4114 target_fl->l_type = tswap16(fl.l_type);
4115 target_fl->l_whence = tswap16(fl.l_whence);
4116 target_fl->l_start = tswapl(fl.l_start);
4117 target_fl->l_len = tswapl(fl.l_len);
4118 target_fl->l_pid = tswap32(fl.l_pid);
4119 unlock_user_struct(target_fl, arg, 1);
4120 }
4121 break;
4122
4123 case TARGET_F_SETLK:
4124 case TARGET_F_SETLKW:
4125 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4126 return -TARGET_EFAULT;
4127 fl.l_type = tswap16(target_fl->l_type);
4128 fl.l_whence = tswap16(target_fl->l_whence);
4129 fl.l_start = tswapl(target_fl->l_start);
4130 fl.l_len = tswapl(target_fl->l_len);
4131 fl.l_pid = tswap32(target_fl->l_pid);
4132 unlock_user_struct(target_fl, arg, 0);
4133 ret = get_errno(fcntl(fd, host_cmd, &fl));
4134 break;
4135
4136 case TARGET_F_GETLK64:
4137 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4138 return -TARGET_EFAULT;
4139 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4140 fl64.l_whence = tswap16(target_fl64->l_whence);
4141 fl64.l_start = tswapl(target_fl64->l_start);
4142 fl64.l_len = tswapl(target_fl64->l_len);
4143 fl64.l_pid = tswap32(target_fl64->l_pid);
4144 unlock_user_struct(target_fl64, arg, 0);
4145 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4146 if (ret == 0) {
4147 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4148 return -TARGET_EFAULT;
4149 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4150 target_fl64->l_whence = tswap16(fl64.l_whence);
4151 target_fl64->l_start = tswapl(fl64.l_start);
4152 target_fl64->l_len = tswapl(fl64.l_len);
4153 target_fl64->l_pid = tswap32(fl64.l_pid);
4154 unlock_user_struct(target_fl64, arg, 1);
4155 }
4156 break;
4157 case TARGET_F_SETLK64:
4158 case TARGET_F_SETLKW64:
4159 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4160 return -TARGET_EFAULT;
4161 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4162 fl64.l_whence = tswap16(target_fl64->l_whence);
4163 fl64.l_start = tswapl(target_fl64->l_start);
4164 fl64.l_len = tswapl(target_fl64->l_len);
4165 fl64.l_pid = tswap32(target_fl64->l_pid);
4166 unlock_user_struct(target_fl64, arg, 0);
4167 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4168 break;
4169
4170 case TARGET_F_GETFL:
4171 ret = get_errno(fcntl(fd, host_cmd, arg));
4172 if (ret >= 0) {
4173 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4174 }
4175 break;
4176
4177 case TARGET_F_SETFL:
4178 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4179 break;
4180
4181 case TARGET_F_SETOWN:
4182 case TARGET_F_GETOWN:
4183 case TARGET_F_SETSIG:
4184 case TARGET_F_GETSIG:
4185 case TARGET_F_SETLEASE:
4186 case TARGET_F_GETLEASE:
4187 ret = get_errno(fcntl(fd, host_cmd, arg));
4188 break;
4189
4190 default:
4191 ret = get_errno(fcntl(fd, cmd, arg));
4192 break;
4193 }
4194 return ret;
4195 }
4196
4197 #ifdef USE_UID16
4198
4199 static inline int high2lowuid(int uid)
4200 {
4201 if (uid > 65535)
4202 return 65534;
4203 else
4204 return uid;
4205 }
4206
4207 static inline int high2lowgid(int gid)
4208 {
4209 if (gid > 65535)
4210 return 65534;
4211 else
4212 return gid;
4213 }
4214
4215 static inline int low2highuid(int uid)
4216 {
4217 if ((int16_t)uid == -1)
4218 return -1;
4219 else
4220 return uid;
4221 }
4222
4223 static inline int low2highgid(int gid)
4224 {
4225 if ((int16_t)gid == -1)
4226 return -1;
4227 else
4228 return gid;
4229 }
4230 static inline int tswapid(int id)
4231 {
4232 return tswap16(id);
4233 }
4234 #else /* !USE_UID16 */
4235 static inline int high2lowuid(int uid)
4236 {
4237 return uid;
4238 }
4239 static inline int high2lowgid(int gid)
4240 {
4241 return gid;
4242 }
4243 static inline int low2highuid(int uid)
4244 {
4245 return uid;
4246 }
4247 static inline int low2highgid(int gid)
4248 {
4249 return gid;
4250 }
4251 static inline int tswapid(int id)
4252 {
4253 return tswap32(id);
4254 }
4255 #endif /* USE_UID16 */
4256
4257 void syscall_init(void)
4258 {
4259 IOCTLEntry *ie;
4260 const argtype *arg_type;
4261 int size;
4262 int i;
4263
4264 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4265 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4266 #include "syscall_types.h"
4267 #undef STRUCT
4268 #undef STRUCT_SPECIAL
4269
4270 /* we patch the ioctl size if necessary. We rely on the fact that
4271 no ioctl has all the bits at '1' in the size field */
4272 ie = ioctl_entries;
4273 while (ie->target_cmd != 0) {
4274 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4275 TARGET_IOC_SIZEMASK) {
4276 arg_type = ie->arg_type;
4277 if (arg_type[0] != TYPE_PTR) {
4278 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4279 ie->target_cmd);
4280 exit(1);
4281 }
4282 arg_type++;
4283 size = thunk_type_size(arg_type, 0);
4284 ie->target_cmd = (ie->target_cmd &
4285 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4286 (size << TARGET_IOC_SIZESHIFT);
4287 }
4288
4289 /* Build target_to_host_errno_table[] table from
4290 * host_to_target_errno_table[]. */
4291 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4292 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4293
4294 /* automatic consistency check if same arch */
4295 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4296 (defined(__x86_64__) && defined(TARGET_X86_64))
4297 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4298 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4299 ie->name, ie->target_cmd, ie->host_cmd);
4300 }
4301 #endif
4302 ie++;
4303 }
4304 }
4305
4306 #if TARGET_ABI_BITS == 32
4307 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4308 {
4309 #ifdef TARGET_WORDS_BIGENDIAN
4310 return ((uint64_t)word0 << 32) | word1;
4311 #else
4312 return ((uint64_t)word1 << 32) | word0;
4313 #endif
4314 }
4315 #else /* TARGET_ABI_BITS == 32 */
4316 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4317 {
4318 return word0;
4319 }
4320 #endif /* TARGET_ABI_BITS != 32 */
4321
4322 #ifdef TARGET_NR_truncate64
4323 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4324 abi_long arg2,
4325 abi_long arg3,
4326 abi_long arg4)
4327 {
4328 #ifdef TARGET_ARM
4329 if (((CPUARMState *)cpu_env)->eabi)
4330 {
4331 arg2 = arg3;
4332 arg3 = arg4;
4333 }
4334 #endif
4335 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4336 }
4337 #endif
4338
4339 #ifdef TARGET_NR_ftruncate64
4340 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4341 abi_long arg2,
4342 abi_long arg3,
4343 abi_long arg4)
4344 {
4345 #ifdef TARGET_ARM
4346 if (((CPUARMState *)cpu_env)->eabi)
4347 {
4348 arg2 = arg3;
4349 arg3 = arg4;
4350 }
4351 #endif
4352 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4353 }
4354 #endif
4355
4356 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4357 abi_ulong target_addr)
4358 {
4359 struct target_timespec *target_ts;
4360
4361 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4362 return -TARGET_EFAULT;
4363 host_ts->tv_sec = tswapl(target_ts->tv_sec);
4364 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
4365 unlock_user_struct(target_ts, target_addr, 0);
4366 return 0;
4367 }
4368
4369 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4370 struct timespec *host_ts)
4371 {
4372 struct target_timespec *target_ts;
4373
4374 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4375 return -TARGET_EFAULT;
4376 target_ts->tv_sec = tswapl(host_ts->tv_sec);
4377 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
4378 unlock_user_struct(target_ts, target_addr, 1);
4379 return 0;
4380 }
4381
4382 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4383 static inline abi_long host_to_target_stat64(void *cpu_env,
4384 abi_ulong target_addr,
4385 struct stat *host_st)
4386 {
4387 #ifdef TARGET_ARM
4388 if (((CPUARMState *)cpu_env)->eabi) {
4389 struct target_eabi_stat64 *target_st;
4390
4391 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4392 return -TARGET_EFAULT;
4393 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4394 __put_user(host_st->st_dev, &target_st->st_dev);
4395 __put_user(host_st->st_ino, &target_st->st_ino);
4396 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4397 __put_user(host_st->st_ino, &target_st->__st_ino);
4398 #endif
4399 __put_user(host_st->st_mode, &target_st->st_mode);
4400 __put_user(host_st->st_nlink, &target_st->st_nlink);
4401 __put_user(host_st->st_uid, &target_st->st_uid);
4402 __put_user(host_st->st_gid, &target_st->st_gid);
4403 __put_user(host_st->st_rdev, &target_st->st_rdev);
4404 __put_user(host_st->st_size, &target_st->st_size);
4405 __put_user(host_st->st_blksize, &target_st->st_blksize);
4406 __put_user(host_st->st_blocks, &target_st->st_blocks);
4407 __put_user(host_st->st_atime, &target_st->target_st_atime);
4408 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4409 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4410 unlock_user_struct(target_st, target_addr, 1);
4411 } else
4412 #endif
4413 {
4414 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4415 struct target_stat *target_st;
4416 #else
4417 struct target_stat64 *target_st;
4418 #endif
4419
4420 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4421 return -TARGET_EFAULT;
4422 memset(target_st, 0, sizeof(*target_st));
4423 __put_user(host_st->st_dev, &target_st->st_dev);
4424 __put_user(host_st->st_ino, &target_st->st_ino);
4425 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4426 __put_user(host_st->st_ino, &target_st->__st_ino);
4427 #endif
4428 __put_user(host_st->st_mode, &target_st->st_mode);
4429 __put_user(host_st->st_nlink, &target_st->st_nlink);
4430 __put_user(host_st->st_uid, &target_st->st_uid);
4431 __put_user(host_st->st_gid, &target_st->st_gid);
4432 __put_user(host_st->st_rdev, &target_st->st_rdev);
4433 /* XXX: better use of kernel struct */
4434 __put_user(host_st->st_size, &target_st->st_size);
4435 __put_user(host_st->st_blksize, &target_st->st_blksize);
4436 __put_user(host_st->st_blocks, &target_st->st_blocks);
4437 __put_user(host_st->st_atime, &target_st->target_st_atime);
4438 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4439 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4440 unlock_user_struct(target_st, target_addr, 1);
4441 }
4442
4443 return 0;
4444 }
4445 #endif
4446
4447 #if defined(CONFIG_USE_NPTL)
4448 /* ??? Using host futex calls even when target atomic operations
4449 are not really atomic probably breaks things. However implementing
4450 futexes locally would make futexes shared between multiple processes
4451 tricky. However they're probably useless because guest atomic
4452 operations won't work either. */
4453 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4454 target_ulong uaddr2, int val3)
4455 {
4456 struct timespec ts, *pts;
4457 int base_op;
4458
4459 /* ??? We assume FUTEX_* constants are the same on both host
4460 and target. */
4461 #ifdef FUTEX_CMD_MASK
4462 base_op = op & FUTEX_CMD_MASK;
4463 #else
4464 base_op = op;
4465 #endif
4466 switch (base_op) {
4467 case FUTEX_WAIT:
4468 if (timeout) {
4469 pts = &ts;
4470 target_to_host_timespec(pts, timeout);
4471 } else {
4472 pts = NULL;
4473 }
4474 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4475 pts, NULL, 0));
4476 case FUTEX_WAKE:
4477 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4478 case FUTEX_FD:
4479 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4480 case FUTEX_REQUEUE:
4481 case FUTEX_CMP_REQUEUE:
4482 case FUTEX_WAKE_OP:
4483 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4484 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4485 But the prototype takes a `struct timespec *'; insert casts
4486 to satisfy the compiler. We do not need to tswap TIMEOUT
4487 since it's not compared to guest memory. */
4488 pts = (struct timespec *)(uintptr_t) timeout;
4489 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4490 g2h(uaddr2),
4491 (base_op == FUTEX_CMP_REQUEUE
4492 ? tswap32(val3)
4493 : val3)));
4494 default:
4495 return -TARGET_ENOSYS;
4496 }
4497 }
4498 #endif
4499
4500 /* Map host to target signal numbers for the wait family of syscalls.
4501 Assume all other status bits are the same. */
4502 static int host_to_target_waitstatus(int status)
4503 {
4504 if (WIFSIGNALED(status)) {
4505 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4506 }
4507 if (WIFSTOPPED(status)) {
4508 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4509 | (status & 0xff);
4510 }
4511 return status;
4512 }
4513
4514 int get_osversion(void)
4515 {
4516 static int osversion;
4517 struct new_utsname buf;
4518 const char *s;
4519 int i, n, tmp;
4520 if (osversion)
4521 return osversion;
4522 if (qemu_uname_release && *qemu_uname_release) {
4523 s = qemu_uname_release;
4524 } else {
4525 if (sys_uname(&buf))
4526 return 0;
4527 s = buf.release;
4528 }
4529 tmp = 0;
4530 for (i = 0; i < 3; i++) {
4531 n = 0;
4532 while (*s >= '0' && *s <= '9') {
4533 n *= 10;
4534 n += *s - '0';
4535 s++;
4536 }
4537 tmp = (tmp << 8) + n;
4538 if (*s == '.')
4539 s++;
4540 }
4541 osversion = tmp;
4542 return osversion;
4543 }
4544
4545 /* do_syscall() should always have a single exit point at the end so
4546 that actions, such as logging of syscall results, can be performed.
4547 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4548 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4549 abi_long arg2, abi_long arg3, abi_long arg4,
4550 abi_long arg5, abi_long arg6, abi_long arg7,
4551 abi_long arg8)
4552 {
4553 abi_long ret;
4554 struct stat st;
4555 struct statfs stfs;
4556 void *p;
4557
4558 #ifdef DEBUG
4559 gemu_log("syscall %d", num);
4560 #endif
4561 if(do_strace)
4562 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4563
4564 switch(num) {
4565 case TARGET_NR_exit:
4566 #ifdef CONFIG_USE_NPTL
4567 /* In old applications this may be used to implement _exit(2).
4568 However in threaded applictions it is used for thread termination,
4569 and _exit_group is used for application termination.
4570 Do thread termination if we have more then one thread. */
4571 /* FIXME: This probably breaks if a signal arrives. We should probably
4572 be disabling signals. */
4573 if (first_cpu->next_cpu) {
4574 TaskState *ts;
4575 CPUState **lastp;
4576 CPUState *p;
4577
4578 cpu_list_lock();
4579 lastp = &first_cpu;
4580 p = first_cpu;
4581 while (p && p != (CPUState *)cpu_env) {
4582 lastp = &p->next_cpu;
4583 p = p->next_cpu;
4584 }
4585 /* If we didn't find the CPU for this thread then something is
4586 horribly wrong. */
4587 if (!p)
4588 abort();
4589 /* Remove the CPU from the list. */
4590 *lastp = p->next_cpu;
4591 cpu_list_unlock();
4592 ts = ((CPUState *)cpu_env)->opaque;
4593 if (ts->child_tidptr) {
4594 put_user_u32(0, ts->child_tidptr);
4595 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4596 NULL, NULL, 0);
4597 }
4598 thread_env = NULL;
4599 qemu_free(cpu_env);
4600 qemu_free(ts);
4601 pthread_exit(NULL);
4602 }
4603 #endif
4604 #ifdef TARGET_GPROF
4605 _mcleanup();
4606 #endif
4607 gdb_exit(cpu_env, arg1);
4608 _exit(arg1);
4609 ret = 0; /* avoid warning */
4610 break;
4611 case TARGET_NR_read:
4612 if (arg3 == 0)
4613 ret = 0;
4614 else {
4615 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4616 goto efault;
4617 ret = get_errno(read(arg1, p, arg3));
4618 unlock_user(p, arg2, ret);
4619 }
4620 break;
4621 case TARGET_NR_write:
4622 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4623 goto efault;
4624 ret = get_errno(write(arg1, p, arg3));
4625 unlock_user(p, arg2, 0);
4626 break;
4627 case TARGET_NR_open:
4628 if (!(p = lock_user_string(arg1)))
4629 goto efault;
4630 ret = get_errno(open(path(p),
4631 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4632 arg3));
4633 unlock_user(p, arg1, 0);
4634 break;
4635 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4636 case TARGET_NR_openat:
4637 if (!(p = lock_user_string(arg2)))
4638 goto efault;
4639 ret = get_errno(sys_openat(arg1,
4640 path(p),
4641 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4642 arg4));
4643 unlock_user(p, arg2, 0);
4644 break;
4645 #endif
4646 case TARGET_NR_close:
4647 ret = get_errno(close(arg1));
4648 break;
4649 case TARGET_NR_brk:
4650 ret = do_brk(arg1);
4651 break;
4652 case TARGET_NR_fork:
4653 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4654 break;
4655 #ifdef TARGET_NR_waitpid
4656 case TARGET_NR_waitpid:
4657 {
4658 int status;
4659 ret = get_errno(waitpid(arg1, &status, arg3));
4660 if (!is_error(ret) && arg2
4661 && put_user_s32(host_to_target_waitstatus(status), arg2))
4662 goto efault;
4663 }
4664 break;
4665 #endif
4666 #ifdef TARGET_NR_waitid
4667 case TARGET_NR_waitid:
4668 {
4669 siginfo_t info;
4670 info.si_pid = 0;
4671 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4672 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4673 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4674 goto efault;
4675 host_to_target_siginfo(p, &info);
4676 unlock_user(p, arg3, sizeof(target_siginfo_t));
4677 }
4678 }
4679 break;
4680 #endif
4681 #ifdef TARGET_NR_creat /* not on alpha */
4682 case TARGET_NR_creat:
4683 if (!(p = lock_user_string(arg1)))
4684 goto efault;
4685 ret = get_errno(creat(p, arg2));
4686 unlock_user(p, arg1, 0);
4687 break;
4688 #endif
4689 case TARGET_NR_link:
4690 {
4691 void * p2;
4692 p = lock_user_string(arg1);
4693 p2 = lock_user_string(arg2);
4694 if (!p || !p2)
4695 ret = -TARGET_EFAULT;
4696 else
4697 ret = get_errno(link(p, p2));
4698 unlock_user(p2, arg2, 0);
4699 unlock_user(p, arg1, 0);
4700 }
4701 break;
4702 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4703 case TARGET_NR_linkat:
4704 {
4705 void * p2 = NULL;
4706 if (!arg2 || !arg4)
4707 goto efault;
4708 p = lock_user_string(arg2);
4709 p2 = lock_user_string(arg4);
4710 if (!p || !p2)
4711 ret = -TARGET_EFAULT;
4712 else
4713 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4714 unlock_user(p, arg2, 0);
4715 unlock_user(p2, arg4, 0);
4716 }
4717 break;
4718 #endif
4719 case TARGET_NR_unlink:
4720 if (!(p = lock_user_string(arg1)))
4721 goto efault;
4722 ret = get_errno(unlink(p));
4723 unlock_user(p, arg1, 0);
4724 break;
4725 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4726 case TARGET_NR_unlinkat:
4727 if (!(p = lock_user_string(arg2)))
4728 goto efault;
4729 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4730 unlock_user(p, arg2, 0);
4731 break;
4732 #endif
4733 case TARGET_NR_execve:
4734 {
4735 char **argp, **envp;
4736 int argc, envc;
4737 abi_ulong gp;
4738 abi_ulong guest_argp;
4739 abi_ulong guest_envp;
4740 abi_ulong addr;
4741 char **q;
4742
4743 argc = 0;
4744 guest_argp = arg2;
4745 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4746 if (get_user_ual(addr, gp))
4747 goto efault;
4748 if (!addr)
4749 break;
4750 argc++;
4751 }
4752 envc = 0;
4753 guest_envp = arg3;
4754 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4755 if (get_user_ual(addr, gp))
4756 goto efault;
4757 if (!addr)
4758 break;
4759 envc++;
4760 }
4761
4762 argp = alloca((argc + 1) * sizeof(void *));
4763 envp = alloca((envc + 1) * sizeof(void *));
4764
4765 for (gp = guest_argp, q = argp; gp;
4766 gp += sizeof(abi_ulong), q++) {
4767 if (get_user_ual(addr, gp))
4768 goto execve_efault;
4769 if (!addr)
4770 break;
4771 if (!(*q = lock_user_string(addr)))
4772 goto execve_efault;
4773 }
4774 *q = NULL;
4775
4776 for (gp = guest_envp, q = envp; gp;
4777 gp += sizeof(abi_ulong), q++) {
4778 if (get_user_ual(addr, gp))
4779 goto execve_efault;
4780 if (!addr)
4781 break;
4782 if (!(*q = lock_user_string(addr)))
4783 goto execve_efault;
4784 }
4785 *q = NULL;
4786
4787 if (!(p = lock_user_string(arg1)))
4788 goto execve_efault;
4789 ret = get_errno(execve(p, argp, envp));
4790 unlock_user(p, arg1, 0);
4791
4792 goto execve_end;
4793
4794 execve_efault:
4795 ret = -TARGET_EFAULT;
4796
4797 execve_end:
4798 for (gp = guest_argp, q = argp; *q;
4799 gp += sizeof(abi_ulong), q++) {
4800 if (get_user_ual(addr, gp)
4801 || !addr)
4802 break;
4803 unlock_user(*q, addr, 0);
4804 }
4805 for (gp = guest_envp, q = envp; *q;
4806 gp += sizeof(abi_ulong), q++) {
4807 if (get_user_ual(addr, gp)
4808 || !addr)
4809 break;
4810 unlock_user(*q, addr, 0);
4811 }
4812 }
4813 break;
4814 case TARGET_NR_chdir:
4815 if (!(p = lock_user_string(arg1)))
4816 goto efault;
4817 ret = get_errno(chdir(p));
4818 unlock_user(p, arg1, 0);
4819 break;
4820 #ifdef TARGET_NR_time
4821 case TARGET_NR_time:
4822 {
4823 time_t host_time;
4824 ret = get_errno(time(&host_time));
4825 if (!is_error(ret)
4826 && arg1
4827 && put_user_sal(host_time, arg1))
4828 goto efault;
4829 }
4830 break;
4831 #endif
4832 case TARGET_NR_mknod:
4833 if (!(p = lock_user_string(arg1)))
4834 goto efault;
4835 ret = get_errno(mknod(p, arg2, arg3));
4836 unlock_user(p, arg1, 0);
4837 break;
4838 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4839 case TARGET_NR_mknodat:
4840 if (!(p = lock_user_string(arg2)))
4841 goto efault;
4842 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4843 unlock_user(p, arg2, 0);
4844 break;
4845 #endif
4846 case TARGET_NR_chmod:
4847 if (!(p = lock_user_string(arg1)))
4848 goto efault;
4849 ret = get_errno(chmod(p, arg2));
4850 unlock_user(p, arg1, 0);
4851 break;
4852 #ifdef TARGET_NR_break
4853 case TARGET_NR_break:
4854 goto unimplemented;
4855 #endif
4856 #ifdef TARGET_NR_oldstat
4857 case TARGET_NR_oldstat:
4858 goto unimplemented;
4859 #endif
4860 case TARGET_NR_lseek:
4861 ret = get_errno(lseek(arg1, arg2, arg3));
4862 break;
4863 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4864 /* Alpha specific */
4865 case TARGET_NR_getxpid:
4866 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4867 ret = get_errno(getpid());
4868 break;
4869 #endif
4870 #ifdef TARGET_NR_getpid
4871 case TARGET_NR_getpid:
4872 ret = get_errno(getpid());
4873 break;
4874 #endif
4875 case TARGET_NR_mount:
4876 {
4877 /* need to look at the data field */
4878 void *p2, *p3;
4879 p = lock_user_string(arg1);
4880 p2 = lock_user_string(arg2);
4881 p3 = lock_user_string(arg3);
4882 if (!p || !p2 || !p3)
4883 ret = -TARGET_EFAULT;
4884 else {
4885 /* FIXME - arg5 should be locked, but it isn't clear how to
4886 * do that since it's not guaranteed to be a NULL-terminated
4887 * string.
4888 */
4889 if ( ! arg5 )
4890 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
4891 else
4892 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4893 }
4894 unlock_user(p, arg1, 0);
4895 unlock_user(p2, arg2, 0);
4896 unlock_user(p3, arg3, 0);
4897 break;
4898 }
4899 #ifdef TARGET_NR_umount
4900 case TARGET_NR_umount:
4901 if (!(p = lock_user_string(arg1)))
4902 goto efault;
4903 ret = get_errno(umount(p));
4904 unlock_user(p, arg1, 0);
4905 break;
4906 #endif
4907 #ifdef TARGET_NR_stime /* not on alpha */
4908 case TARGET_NR_stime:
4909 {
4910 time_t host_time;
4911 if (get_user_sal(host_time, arg1))
4912 goto efault;
4913 ret = get_errno(stime(&host_time));
4914 }
4915 break;
4916 #endif
4917 case TARGET_NR_ptrace:
4918 goto unimplemented;
4919 #ifdef TARGET_NR_alarm /* not on alpha */
4920 case TARGET_NR_alarm:
4921 ret = alarm(arg1);
4922 break;
4923 #endif
4924 #ifdef TARGET_NR_oldfstat
4925 case TARGET_NR_oldfstat:
4926 goto unimplemented;
4927 #endif
4928 #ifdef TARGET_NR_pause /* not on alpha */
4929 case TARGET_NR_pause:
4930 ret = get_errno(pause());
4931 break;
4932 #endif
4933 #ifdef TARGET_NR_utime
4934 case TARGET_NR_utime:
4935 {
4936 struct utimbuf tbuf, *host_tbuf;
4937 struct target_utimbuf *target_tbuf;
4938 if (arg2) {
4939 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4940 goto efault;
4941 tbuf.actime = tswapl(target_tbuf->actime);
4942 tbuf.modtime = tswapl(target_tbuf->modtime);
4943 unlock_user_struct(target_tbuf, arg2, 0);
4944 host_tbuf = &tbuf;
4945 } else {
4946 host_tbuf = NULL;
4947 }
4948 if (!(p = lock_user_string(arg1)))
4949 goto efault;
4950 ret = get_errno(utime(p, host_tbuf));
4951 unlock_user(p, arg1, 0);
4952 }
4953 break;
4954 #endif
4955 case TARGET_NR_utimes:
4956 {
4957 struct timeval *tvp, tv[2];
4958 if (arg2) {
4959 if (copy_from_user_timeval(&tv[0], arg2)
4960 || copy_from_user_timeval(&tv[1],
4961 arg2 + sizeof(struct target_timeval)))
4962 goto efault;
4963 tvp = tv;
4964 } else {
4965 tvp = NULL;
4966 }
4967 if (!(p = lock_user_string(arg1)))
4968 goto efault;
4969 ret = get_errno(utimes(p, tvp));
4970 unlock_user(p, arg1, 0);
4971 }
4972 break;
4973 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4974 case TARGET_NR_futimesat:
4975 {
4976 struct timeval *tvp, tv[2];
4977 if (arg3) {
4978 if (copy_from_user_timeval(&tv[0], arg3)
4979 || copy_from_user_timeval(&tv[1],
4980 arg3 + sizeof(struct target_timeval)))
4981 goto efault;
4982 tvp = tv;
4983 } else {
4984 tvp = NULL;
4985 }
4986 if (!(p = lock_user_string(arg2)))
4987 goto efault;
4988 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4989 unlock_user(p, arg2, 0);
4990 }
4991 break;
4992 #endif
4993 #ifdef TARGET_NR_stty
4994 case TARGET_NR_stty:
4995 goto unimplemented;
4996 #endif
4997 #ifdef TARGET_NR_gtty
4998 case TARGET_NR_gtty:
4999 goto unimplemented;
5000 #endif
5001 case TARGET_NR_access:
5002 if (!(p = lock_user_string(arg1)))
5003 goto efault;
5004 ret = get_errno(access(path(p), arg2));
5005 unlock_user(p, arg1, 0);
5006 break;
5007 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5008 case TARGET_NR_faccessat:
5009 if (!(p = lock_user_string(arg2)))
5010 goto efault;
5011 ret = get_errno(sys_faccessat(arg1, p, arg3));
5012 unlock_user(p, arg2, 0);
5013 break;
5014 #endif
5015 #ifdef TARGET_NR_nice /* not on alpha */
5016 case TARGET_NR_nice:
5017 ret = get_errno(nice(arg1));
5018 break;
5019 #endif
5020 #ifdef TARGET_NR_ftime
5021 case TARGET_NR_ftime:
5022 goto unimplemented;
5023 #endif
5024 case TARGET_NR_sync:
5025 sync();
5026 ret = 0;
5027 break;
5028 case TARGET_NR_kill:
5029 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5030 break;
5031 case TARGET_NR_rename:
5032 {
5033 void *p2;
5034 p = lock_user_string(arg1);
5035 p2 = lock_user_string(arg2);
5036 if (!p || !p2)
5037 ret = -TARGET_EFAULT;
5038 else
5039 ret = get_errno(rename(p, p2));
5040 unlock_user(p2, arg2, 0);
5041 unlock_user(p, arg1, 0);
5042 }
5043 break;
5044 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5045 case TARGET_NR_renameat:
5046 {
5047 void *p2;
5048 p = lock_user_string(arg2);
5049 p2 = lock_user_string(arg4);
5050 if (!p || !p2)
5051 ret = -TARGET_EFAULT;
5052 else
5053 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5054 unlock_user(p2, arg4, 0);
5055 unlock_user(p, arg2, 0);
5056 }
5057 break;
5058 #endif
5059 case TARGET_NR_mkdir:
5060 if (!(p = lock_user_string(arg1)))
5061 goto efault;
5062 ret = get_errno(mkdir(p, arg2));
5063 unlock_user(p, arg1, 0);
5064 break;
5065 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5066 case TARGET_NR_mkdirat:
5067 if (!(p = lock_user_string(arg2)))
5068 goto efault;
5069 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5070 unlock_user(p, arg2, 0);
5071 break;
5072 #endif
5073 case TARGET_NR_rmdir:
5074 if (!(p = lock_user_string(arg1)))
5075 goto efault;
5076 ret = get_errno(rmdir(p));
5077 unlock_user(p, arg1, 0);
5078 break;
5079 case TARGET_NR_dup:
5080 ret = get_errno(dup(arg1));
5081 break;
5082 case TARGET_NR_pipe:
5083 ret = do_pipe(cpu_env, arg1, 0, 0);
5084 break;
5085 #ifdef TARGET_NR_pipe2
5086 case TARGET_NR_pipe2:
5087 ret = do_pipe(cpu_env, arg1, arg2, 1);
5088 break;
5089 #endif
5090 case TARGET_NR_times:
5091 {
5092 struct target_tms *tmsp;
5093 struct tms tms;
5094 ret = get_errno(times(&tms));
5095 if (arg1) {
5096 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5097 if (!tmsp)
5098 goto efault;
5099 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
5100 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
5101 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
5102 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
5103 }
5104 if (!is_error(ret))
5105 ret = host_to_target_clock_t(ret);
5106 }
5107 break;
5108 #ifdef TARGET_NR_prof
5109 case TARGET_NR_prof:
5110 goto unimplemented;
5111 #endif
5112 #ifdef TARGET_NR_signal
5113 case TARGET_NR_signal:
5114 goto unimplemented;
5115 #endif
5116 case TARGET_NR_acct:
5117 if (arg1 == 0) {
5118 ret = get_errno(acct(NULL));
5119 } else {
5120 if (!(p = lock_user_string(arg1)))
5121 goto efault;
5122 ret = get_errno(acct(path(p)));
5123 unlock_user(p, arg1, 0);
5124 }
5125 break;
5126 #ifdef TARGET_NR_umount2 /* not on alpha */
5127 case TARGET_NR_umount2:
5128 if (!(p = lock_user_string(arg1)))
5129 goto efault;
5130 ret = get_errno(umount2(p, arg2));
5131 unlock_user(p, arg1, 0);
5132 break;
5133 #endif
5134 #ifdef TARGET_NR_lock
5135 case TARGET_NR_lock:
5136 goto unimplemented;
5137 #endif
5138 case TARGET_NR_ioctl:
5139 ret = do_ioctl(arg1, arg2, arg3);
5140 break;
5141 case TARGET_NR_fcntl:
5142 ret = do_fcntl(arg1, arg2, arg3);
5143 break;
5144 #ifdef TARGET_NR_mpx
5145 case TARGET_NR_mpx:
5146 goto unimplemented;
5147 #endif
5148 case TARGET_NR_setpgid:
5149 ret = get_errno(setpgid(arg1, arg2));
5150 break;
5151 #ifdef TARGET_NR_ulimit
5152 case TARGET_NR_ulimit:
5153 goto unimplemented;
5154 #endif
5155 #ifdef TARGET_NR_oldolduname
5156 case TARGET_NR_oldolduname:
5157 goto unimplemented;
5158 #endif
5159 case TARGET_NR_umask:
5160 ret = get_errno(umask(arg1));
5161 break;
5162 case TARGET_NR_chroot:
5163 if (!(p = lock_user_string(arg1)))
5164 goto efault;
5165 ret = get_errno(chroot(p));
5166 unlock_user(p, arg1, 0);
5167 break;
5168 case TARGET_NR_ustat:
5169 goto unimplemented;
5170 case TARGET_NR_dup2:
5171 ret = get_errno(dup2(arg1, arg2));
5172 break;
5173 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5174 case TARGET_NR_dup3:
5175 ret = get_errno(dup3(arg1, arg2, arg3));
5176 break;
5177 #endif
5178 #ifdef TARGET_NR_getppid /* not on alpha */
5179 case TARGET_NR_getppid:
5180 ret = get_errno(getppid());
5181 break;
5182 #endif
5183 case TARGET_NR_getpgrp:
5184 ret = get_errno(getpgrp());
5185 break;
5186 case TARGET_NR_setsid:
5187 ret = get_errno(setsid());
5188 break;
5189 #ifdef TARGET_NR_sigaction
5190 case TARGET_NR_sigaction:
5191 {
5192 #if defined(TARGET_ALPHA)
5193 struct target_sigaction act, oact, *pact = 0;
5194 struct target_old_sigaction *old_act;
5195 if (arg2) {
5196 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5197 goto efault;
5198 act._sa_handler = old_act->_sa_handler;
5199 target_siginitset(&act.sa_mask, old_act->sa_mask);
5200 act.sa_flags = old_act->sa_flags;
5201 act.sa_restorer = 0;
5202 unlock_user_struct(old_act, arg2, 0);
5203 pact = &act;
5204 }
5205 ret = get_errno(do_sigaction(arg1, pact, &oact));
5206 if (!is_error(ret) && arg3) {
5207 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5208 goto efault;
5209 old_act->_sa_handler = oact._sa_handler;
5210 old_act->sa_mask = oact.sa_mask.sig[0];
5211 old_act->sa_flags = oact.sa_flags;
5212 unlock_user_struct(old_act, arg3, 1);
5213 }
5214 #elif defined(TARGET_MIPS)
5215 struct target_sigaction act, oact, *pact, *old_act;
5216
5217 if (arg2) {
5218 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5219 goto efault;
5220 act._sa_handler = old_act->_sa_handler;
5221 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5222 act.sa_flags = old_act->sa_flags;
5223 unlock_user_struct(old_act, arg2, 0);
5224 pact = &act;
5225 } else {
5226 pact = NULL;
5227 }
5228
5229 ret = get_errno(do_sigaction(arg1, pact, &oact));
5230
5231 if (!is_error(ret) && arg3) {
5232 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5233 goto efault;
5234 old_act->_sa_handler = oact._sa_handler;
5235 old_act->sa_flags = oact.sa_flags;
5236 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5237 old_act->sa_mask.sig[1] = 0;
5238 old_act->sa_mask.sig[2] = 0;
5239 old_act->sa_mask.sig[3] = 0;
5240 unlock_user_struct(old_act, arg3, 1);
5241 }
5242 #else
5243 struct target_old_sigaction *old_act;
5244 struct target_sigaction act, oact, *pact;
5245 if (arg2) {
5246 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5247 goto efault;
5248 act._sa_handler = old_act->_sa_handler;
5249 target_siginitset(&act.sa_mask, old_act->sa_mask);
5250 act.sa_flags = old_act->sa_flags;
5251 act.sa_restorer = old_act->sa_restorer;
5252 unlock_user_struct(old_act, arg2, 0);
5253 pact = &act;
5254 } else {
5255 pact = NULL;
5256 }
5257 ret = get_errno(do_sigaction(arg1, pact, &oact));
5258 if (!is_error(ret) && arg3) {
5259 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5260 goto efault;
5261 old_act->_sa_handler = oact._sa_handler;
5262 old_act->sa_mask = oact.sa_mask.sig[0];
5263 old_act->sa_flags = oact.sa_flags;
5264 old_act->sa_restorer = oact.sa_restorer;
5265 unlock_user_struct(old_act, arg3, 1);
5266 }
5267 #endif
5268 }
5269 break;
5270 #endif
5271 case TARGET_NR_rt_sigaction:
5272 {
5273 #if defined(TARGET_ALPHA)
5274 struct target_sigaction act, oact, *pact = 0;
5275 struct target_rt_sigaction *rt_act;
5276 /* ??? arg4 == sizeof(sigset_t). */
5277 if (arg2) {
5278 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5279 goto efault;
5280 act._sa_handler = rt_act->_sa_handler;
5281 act.sa_mask = rt_act->sa_mask;
5282 act.sa_flags = rt_act->sa_flags;
5283 act.sa_restorer = arg5;
5284 unlock_user_struct(rt_act, arg2, 0);
5285 pact = &act;
5286 }
5287 ret = get_errno(do_sigaction(arg1, pact, &oact));
5288 if (!is_error(ret) && arg3) {
5289 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5290 goto efault;
5291 rt_act->_sa_handler = oact._sa_handler;
5292 rt_act->sa_mask = oact.sa_mask;
5293 rt_act->sa_flags = oact.sa_flags;
5294 unlock_user_struct(rt_act, arg3, 1);
5295 }
5296 #else
5297 struct target_sigaction *act;
5298 struct target_sigaction *oact;
5299
5300 if (arg2) {
5301 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5302 goto efault;
5303 } else
5304 act = NULL;
5305 if (arg3) {
5306 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5307 ret = -TARGET_EFAULT;
5308 goto rt_sigaction_fail;
5309 }
5310 } else
5311 oact = NULL;
5312 ret = get_errno(do_sigaction(arg1, act, oact));
5313 rt_sigaction_fail:
5314 if (act)
5315 unlock_user_struct(act, arg2, 0);
5316 if (oact)
5317 unlock_user_struct(oact, arg3, 1);
5318 #endif
5319 }
5320 break;
5321 #ifdef TARGET_NR_sgetmask /* not on alpha */
5322 case TARGET_NR_sgetmask:
5323 {
5324 sigset_t cur_set;
5325 abi_ulong target_set;
5326 sigprocmask(0, NULL, &cur_set);
5327 host_to_target_old_sigset(&target_set, &cur_set);
5328 ret = target_set;
5329 }
5330 break;
5331 #endif
5332 #ifdef TARGET_NR_ssetmask /* not on alpha */
5333 case TARGET_NR_ssetmask:
5334 {
5335 sigset_t set, oset, cur_set;
5336 abi_ulong target_set = arg1;
5337 sigprocmask(0, NULL, &cur_set);
5338 target_to_host_old_sigset(&set, &target_set);
5339 sigorset(&set, &set, &cur_set);
5340 sigprocmask(SIG_SETMASK, &set, &oset);
5341 host_to_target_old_sigset(&target_set, &oset);
5342 ret = target_set;
5343 }
5344 break;
5345 #endif
5346 #ifdef TARGET_NR_sigprocmask
5347 case TARGET_NR_sigprocmask:
5348 {
5349 #if defined(TARGET_ALPHA)
5350 sigset_t set, oldset;
5351 abi_ulong mask;
5352 int how;
5353
5354 switch (arg1) {
5355 case TARGET_SIG_BLOCK:
5356 how = SIG_BLOCK;
5357 break;
5358 case TARGET_SIG_UNBLOCK:
5359 how = SIG_UNBLOCK;
5360 break;
5361 case TARGET_SIG_SETMASK:
5362 how = SIG_SETMASK;
5363 break;
5364 default:
5365 ret = -TARGET_EINVAL;
5366 goto fail;
5367 }
5368 mask = arg2;
5369 target_to_host_old_sigset(&set, &mask);
5370
5371 ret = get_errno(sigprocmask(how, &set, &oldset));
5372
5373 if (!is_error(ret)) {
5374 host_to_target_old_sigset(&mask, &oldset);
5375 ret = mask;
5376 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5377 }
5378 #else
5379 sigset_t set, oldset, *set_ptr;
5380 int how;
5381
5382 if (arg2) {
5383 switch (arg1) {
5384 case TARGET_SIG_BLOCK:
5385 how = SIG_BLOCK;
5386 break;
5387 case TARGET_SIG_UNBLOCK:
5388 how = SIG_UNBLOCK;
5389 break;
5390 case TARGET_SIG_SETMASK:
5391 how = SIG_SETMASK;
5392 break;
5393 default:
5394 ret = -TARGET_EINVAL;
5395 goto fail;
5396 }
5397 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5398 goto efault;
5399 target_to_host_old_sigset(&set, p);
5400 unlock_user(p, arg2, 0);
5401 set_ptr = &set;
5402 } else {
5403 how = 0;
5404 set_ptr = NULL;
5405 }
5406 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5407 if (!is_error(ret) && arg3) {
5408 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5409 goto efault;
5410 host_to_target_old_sigset(p, &oldset);
5411 unlock_user(p, arg3, sizeof(target_sigset_t));
5412 }
5413 #endif
5414 }
5415 break;
5416 #endif
5417 case TARGET_NR_rt_sigprocmask:
5418 {
5419 int how = arg1;
5420 sigset_t set, oldset, *set_ptr;
5421
5422 if (arg2) {
5423 switch(how) {
5424 case TARGET_SIG_BLOCK:
5425 how = SIG_BLOCK;
5426 break;
5427 case TARGET_SIG_UNBLOCK:
5428 how = SIG_UNBLOCK;
5429 break;
5430 case TARGET_SIG_SETMASK:
5431 how = SIG_SETMASK;
5432 break;
5433 default:
5434 ret = -TARGET_EINVAL;
5435 goto fail;
5436 }
5437 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5438 goto efault;
5439 target_to_host_sigset(&set, p);
5440 unlock_user(p, arg2, 0);
5441 set_ptr = &set;
5442 } else {
5443 how = 0;
5444 set_ptr = NULL;
5445 }
5446 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5447 if (!is_error(ret) && arg3) {
5448 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5449 goto efault;
5450 host_to_target_sigset(p, &oldset);
5451 unlock_user(p, arg3, sizeof(target_sigset_t));
5452 }
5453 }
5454 break;
5455 #ifdef TARGET_NR_sigpending
5456 case TARGET_NR_sigpending:
5457 {
5458 sigset_t set;
5459 ret = get_errno(sigpending(&set));
5460 if (!is_error(ret)) {
5461 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5462 goto efault;
5463 host_to_target_old_sigset(p, &set);
5464 unlock_user(p, arg1, sizeof(target_sigset_t));
5465 }
5466 }
5467 break;
5468 #endif
5469 case TARGET_NR_rt_sigpending:
5470 {
5471 sigset_t set;
5472 ret = get_errno(sigpending(&set));
5473 if (!is_error(ret)) {
5474 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5475 goto efault;
5476 host_to_target_sigset(p, &set);
5477 unlock_user(p, arg1, sizeof(target_sigset_t));
5478 }
5479 }
5480 break;
5481 #ifdef TARGET_NR_sigsuspend
5482 case TARGET_NR_sigsuspend:
5483 {
5484 sigset_t set;
5485 #if defined(TARGET_ALPHA)
5486 abi_ulong mask = arg1;
5487 target_to_host_old_sigset(&set, &mask);
5488 #else
5489 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5490 goto efault;
5491 target_to_host_old_sigset(&set, p);
5492 unlock_user(p, arg1, 0);
5493 #endif
5494 ret = get_errno(sigsuspend(&set));
5495 }
5496 break;
5497 #endif
5498 case TARGET_NR_rt_sigsuspend:
5499 {
5500 sigset_t set;
5501 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5502 goto efault;
5503 target_to_host_sigset(&set, p);
5504 unlock_user(p, arg1, 0);
5505 ret = get_errno(sigsuspend(&set));
5506 }
5507 break;
5508 case TARGET_NR_rt_sigtimedwait:
5509 {
5510 sigset_t set;
5511 struct timespec uts, *puts;
5512 siginfo_t uinfo;
5513
5514 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5515 goto efault;
5516 target_to_host_sigset(&set, p);
5517 unlock_user(p, arg1, 0);
5518 if (arg3) {
5519 puts = &uts;
5520 target_to_host_timespec(puts, arg3);
5521 } else {
5522 puts = NULL;
5523 }
5524 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5525 if (!is_error(ret) && arg2) {
5526 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5527 goto efault;
5528 host_to_target_siginfo(p, &uinfo);
5529 unlock_user(p, arg2, sizeof(target_siginfo_t));
5530 }
5531 }
5532 break;
5533 case TARGET_NR_rt_sigqueueinfo:
5534 {
5535 siginfo_t uinfo;
5536 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5537 goto efault;
5538 target_to_host_siginfo(&uinfo, p);
5539 unlock_user(p, arg1, 0);
5540 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5541 }
5542 break;
5543 #ifdef TARGET_NR_sigreturn
5544 case TARGET_NR_sigreturn:
5545 /* NOTE: ret is eax, so not transcoding must be done */
5546 ret = do_sigreturn(cpu_env);
5547 break;
5548 #endif
5549 case TARGET_NR_rt_sigreturn:
5550 /* NOTE: ret is eax, so not transcoding must be done */
5551 ret = do_rt_sigreturn(cpu_env);
5552 break;
5553 case TARGET_NR_sethostname:
5554 if (!(p = lock_user_string(arg1)))
5555 goto efault;
5556 ret = get_errno(sethostname(p, arg2));
5557 unlock_user(p, arg1, 0);
5558 break;
5559 case TARGET_NR_setrlimit:
5560 {
5561 int resource = arg1;
5562 struct target_rlimit *target_rlim;
5563 struct rlimit rlim;
5564 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5565 goto efault;
5566 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5567 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5568 unlock_user_struct(target_rlim, arg2, 0);
5569 ret = get_errno(setrlimit(resource, &rlim));
5570 }
5571 break;
5572 case TARGET_NR_getrlimit:
5573 {
5574 int resource = arg1;
5575 struct target_rlimit *target_rlim;
5576 struct rlimit rlim;
5577
5578 ret = get_errno(getrlimit(resource, &rlim));
5579 if (!is_error(ret)) {
5580 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5581 goto efault;
5582 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5583 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5584 unlock_user_struct(target_rlim, arg2, 1);
5585 }
5586 }
5587 break;
5588 case TARGET_NR_getrusage:
5589 {
5590 struct rusage rusage;
5591 ret = get_errno(getrusage(arg1, &rusage));
5592 if (!is_error(ret)) {
5593 host_to_target_rusage(arg2, &rusage);
5594 }
5595 }
5596 break;
5597 case TARGET_NR_gettimeofday:
5598 {
5599 struct timeval tv;
5600 ret = get_errno(gettimeofday(&tv, NULL));
5601 if (!is_error(ret)) {
5602 if (copy_to_user_timeval(arg1, &tv))
5603 goto efault;
5604 }
5605 }
5606 break;
5607 case TARGET_NR_settimeofday:
5608 {
5609 struct timeval tv;
5610 if (copy_from_user_timeval(&tv, arg1))
5611 goto efault;
5612 ret = get_errno(settimeofday(&tv, NULL));
5613 }
5614 break;
5615 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5616 case TARGET_NR_select:
5617 {
5618 struct target_sel_arg_struct *sel;
5619 abi_ulong inp, outp, exp, tvp;
5620 long nsel;
5621
5622 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5623 goto efault;
5624 nsel = tswapl(sel->n);
5625 inp = tswapl(sel->inp);
5626 outp = tswapl(sel->outp);
5627 exp = tswapl(sel->exp);
5628 tvp = tswapl(sel->tvp);
5629 unlock_user_struct(sel, arg1, 0);
5630 ret = do_select(nsel, inp, outp, exp, tvp);
5631 }
5632 break;
5633 #endif
5634 #ifdef TARGET_NR_pselect6
5635 case TARGET_NR_pselect6:
5636 {
5637 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
5638 fd_set rfds, wfds, efds;
5639 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
5640 struct timespec ts, *ts_ptr;
5641
5642 /*
5643 * The 6th arg is actually two args smashed together,
5644 * so we cannot use the C library.
5645 */
5646 sigset_t set;
5647 struct {
5648 sigset_t *set;
5649 size_t size;
5650 } sig, *sig_ptr;
5651
5652 abi_ulong arg_sigset, arg_sigsize, *arg7;
5653 target_sigset_t *target_sigset;
5654
5655 n = arg1;
5656 rfd_addr = arg2;
5657 wfd_addr = arg3;
5658 efd_addr = arg4;
5659 ts_addr = arg5;
5660
5661 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
5662 if (ret) {
5663 goto fail;
5664 }
5665 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
5666 if (ret) {
5667 goto fail;
5668 }
5669 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
5670 if (ret) {
5671 goto fail;
5672 }
5673
5674 /*
5675 * This takes a timespec, and not a timeval, so we cannot
5676 * use the do_select() helper ...
5677 */
5678 if (ts_addr) {
5679 if (target_to_host_timespec(&ts, ts_addr)) {
5680 goto efault;
5681 }
5682 ts_ptr = &ts;
5683 } else {
5684 ts_ptr = NULL;
5685 }
5686
5687 /* Extract the two packed args for the sigset */
5688 if (arg6) {
5689 sig_ptr = &sig;
5690 sig.size = _NSIG / 8;
5691
5692 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
5693 if (!arg7) {
5694 goto efault;
5695 }
5696 arg_sigset = tswapl(arg7[0]);
5697 arg_sigsize = tswapl(arg7[1]);
5698 unlock_user(arg7, arg6, 0);
5699
5700 if (arg_sigset) {
5701 sig.set = &set;
5702 target_sigset = lock_user(VERIFY_READ, arg_sigset,
5703 sizeof(*target_sigset), 1);
5704 if (!target_sigset) {
5705 goto efault;
5706 }
5707 target_to_host_sigset(&set, target_sigset);
5708 unlock_user(target_sigset, arg_sigset, 0);
5709 } else {
5710 sig.set = NULL;
5711 }
5712 } else {
5713 sig_ptr = NULL;
5714 }
5715
5716 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
5717 ts_ptr, sig_ptr));
5718
5719 if (!is_error(ret)) {
5720 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
5721 goto efault;
5722 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
5723 goto efault;
5724 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
5725 goto efault;
5726
5727 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
5728 goto efault;
5729 }
5730 }
5731 break;
5732 #endif
5733 case TARGET_NR_symlink:
5734 {
5735 void *p2;
5736 p = lock_user_string(arg1);
5737 p2 = lock_user_string(arg2);
5738 if (!p || !p2)
5739 ret = -TARGET_EFAULT;
5740 else
5741 ret = get_errno(symlink(p, p2));
5742 unlock_user(p2, arg2, 0);
5743 unlock_user(p, arg1, 0);
5744 }
5745 break;
5746 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5747 case TARGET_NR_symlinkat:
5748 {
5749 void *p2;
5750 p = lock_user_string(arg1);
5751 p2 = lock_user_string(arg3);
5752 if (!p || !p2)
5753 ret = -TARGET_EFAULT;
5754 else
5755 ret = get_errno(sys_symlinkat(p, arg2, p2));
5756 unlock_user(p2, arg3, 0);
5757 unlock_user(p, arg1, 0);
5758 }
5759 break;
5760 #endif
5761 #ifdef TARGET_NR_oldlstat
5762 case TARGET_NR_oldlstat:
5763 goto unimplemented;
5764 #endif
5765 case TARGET_NR_readlink:
5766 {
5767 void *p2, *temp;
5768 p = lock_user_string(arg1);
5769 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5770 if (!p || !p2)
5771 ret = -TARGET_EFAULT;
5772 else {
5773 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5774 char real[PATH_MAX];
5775 temp = realpath(exec_path,real);
5776 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5777 snprintf((char *)p2, arg3, "%s", real);
5778 }
5779 else
5780 ret = get_errno(readlink(path(p), p2, arg3));
5781 }
5782 unlock_user(p2, arg2, ret);
5783 unlock_user(p, arg1, 0);
5784 }
5785 break;
5786 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5787 case TARGET_NR_readlinkat:
5788 {
5789 void *p2;
5790 p = lock_user_string(arg2);
5791 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5792 if (!p || !p2)
5793 ret = -TARGET_EFAULT;
5794 else
5795 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5796 unlock_user(p2, arg3, ret);
5797 unlock_user(p, arg2, 0);
5798 }
5799 break;
5800 #endif
5801 #ifdef TARGET_NR_uselib
5802 case TARGET_NR_uselib:
5803 goto unimplemented;
5804 #endif
5805 #ifdef TARGET_NR_swapon
5806 case TARGET_NR_swapon:
5807 if (!(p = lock_user_string(arg1)))
5808 goto efault;
5809 ret = get_errno(swapon(p, arg2));
5810 unlock_user(p, arg1, 0);
5811 break;
5812 #endif
5813 case TARGET_NR_reboot:
5814 goto unimplemented;
5815 #ifdef TARGET_NR_readdir
5816 case TARGET_NR_readdir:
5817 goto unimplemented;
5818 #endif
5819 #ifdef TARGET_NR_mmap
5820 case TARGET_NR_mmap:
5821 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5822 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5823 || defined(TARGET_S390X)
5824 {
5825 abi_ulong *v;
5826 abi_ulong v1, v2, v3, v4, v5, v6;
5827 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5828 goto efault;
5829 v1 = tswapl(v[0]);
5830 v2 = tswapl(v[1]);
5831 v3 = tswapl(v[2]);
5832 v4 = tswapl(v[3]);
5833 v5 = tswapl(v[4]);
5834 v6 = tswapl(v[5]);
5835 unlock_user(v, arg1, 0);
5836 ret = get_errno(target_mmap(v1, v2, v3,
5837 target_to_host_bitmask(v4, mmap_flags_tbl),
5838 v5, v6));
5839 }
5840 #else
5841 ret = get_errno(target_mmap(arg1, arg2, arg3,
5842 target_to_host_bitmask(arg4, mmap_flags_tbl),
5843 arg5,
5844 arg6));
5845 #endif
5846 break;
5847 #endif
5848 #ifdef TARGET_NR_mmap2
5849 case TARGET_NR_mmap2:
5850 #ifndef MMAP_SHIFT
5851 #define MMAP_SHIFT 12
5852 #endif
5853 ret = get_errno(target_mmap(arg1, arg2, arg3,
5854 target_to_host_bitmask(arg4, mmap_flags_tbl),
5855 arg5,
5856 arg6 << MMAP_SHIFT));
5857 break;
5858 #endif
5859 case TARGET_NR_munmap:
5860 ret = get_errno(target_munmap(arg1, arg2));
5861 break;
5862 case TARGET_NR_mprotect:
5863 {
5864 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5865 /* Special hack to detect libc making the stack executable. */
5866 if ((arg3 & PROT_GROWSDOWN)
5867 && arg1 >= ts->info->stack_limit
5868 && arg1 <= ts->info->start_stack) {
5869 arg3 &= ~PROT_GROWSDOWN;
5870 arg2 = arg2 + arg1 - ts->info->stack_limit;
5871 arg1 = ts->info->stack_limit;
5872 }
5873 }
5874 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5875 break;
5876 #ifdef TARGET_NR_mremap
5877 case TARGET_NR_mremap:
5878 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5879 break;
5880 #endif
5881 /* ??? msync/mlock/munlock are broken for softmmu. */
5882 #ifdef TARGET_NR_msync
5883 case TARGET_NR_msync:
5884 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5885 break;
5886 #endif
5887 #ifdef TARGET_NR_mlock
5888 case TARGET_NR_mlock:
5889 ret = get_errno(mlock(g2h(arg1), arg2));
5890 break;
5891 #endif
5892 #ifdef TARGET_NR_munlock
5893 case TARGET_NR_munlock:
5894 ret = get_errno(munlock(g2h(arg1), arg2));
5895 break;
5896 #endif
5897 #ifdef TARGET_NR_mlockall
5898 case TARGET_NR_mlockall:
5899 ret = get_errno(mlockall(arg1));
5900 break;
5901 #endif
5902 #ifdef TARGET_NR_munlockall
5903 case TARGET_NR_munlockall:
5904 ret = get_errno(munlockall());
5905 break;
5906 #endif
5907 case TARGET_NR_truncate:
5908 if (!(p = lock_user_string(arg1)))
5909 goto efault;
5910 ret = get_errno(truncate(p, arg2));
5911 unlock_user(p, arg1, 0);
5912 break;
5913 case TARGET_NR_ftruncate:
5914 ret = get_errno(ftruncate(arg1, arg2));
5915 break;
5916 case TARGET_NR_fchmod:
5917 ret = get_errno(fchmod(arg1, arg2));
5918 break;
5919 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5920 case TARGET_NR_fchmodat:
5921 if (!(p = lock_user_string(arg2)))
5922 goto efault;
5923 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5924 unlock_user(p, arg2, 0);
5925 break;
5926 #endif
5927 case TARGET_NR_getpriority:
5928 /* libc does special remapping of the return value of
5929 * sys_getpriority() so it's just easiest to call
5930 * sys_getpriority() directly rather than through libc. */
5931 ret = get_errno(sys_getpriority(arg1, arg2));
5932 break;
5933 case TARGET_NR_setpriority:
5934 ret = get_errno(setpriority(arg1, arg2, arg3));
5935 break;
5936 #ifdef TARGET_NR_profil
5937 case TARGET_NR_profil:
5938 goto unimplemented;
5939 #endif
5940 case TARGET_NR_statfs:
5941 if (!(p = lock_user_string(arg1)))
5942 goto efault;
5943 ret = get_errno(statfs(path(p), &stfs));
5944 unlock_user(p, arg1, 0);
5945 convert_statfs:
5946 if (!is_error(ret)) {
5947 struct target_statfs *target_stfs;
5948
5949 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5950 goto efault;
5951 __put_user(stfs.f_type, &target_stfs->f_type);
5952 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5953 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5954 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5955 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5956 __put_user(stfs.f_files, &target_stfs->f_files);
5957 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5958 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5959 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5960 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5961 unlock_user_struct(target_stfs, arg2, 1);
5962 }
5963 break;
5964 case TARGET_NR_fstatfs:
5965 ret = get_errno(fstatfs(arg1, &stfs));
5966 goto convert_statfs;
5967 #ifdef TARGET_NR_statfs64
5968 case TARGET_NR_statfs64:
5969 if (!(p = lock_user_string(arg1)))
5970 goto efault;
5971 ret = get_errno(statfs(path(p), &stfs));
5972 unlock_user(p, arg1, 0);
5973 convert_statfs64:
5974 if (!is_error(ret)) {
5975 struct target_statfs64 *target_stfs;
5976
5977 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5978 goto efault;
5979 __put_user(stfs.f_type, &target_stfs->f_type);
5980 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5981 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5982 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5983 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5984 __put_user(stfs.f_files, &target_stfs->f_files);
5985 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5986 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5987 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5988 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5989 unlock_user_struct(target_stfs, arg3, 1);
5990 }
5991 break;
5992 case TARGET_NR_fstatfs64:
5993 ret = get_errno(fstatfs(arg1, &stfs));
5994 goto convert_statfs64;
5995 #endif
5996 #ifdef TARGET_NR_ioperm
5997 case TARGET_NR_ioperm:
5998 goto unimplemented;
5999 #endif
6000 #ifdef TARGET_NR_socketcall
6001 case TARGET_NR_socketcall:
6002 ret = do_socketcall(arg1, arg2);
6003 break;
6004 #endif
6005 #ifdef TARGET_NR_accept
6006 case TARGET_NR_accept:
6007 ret = do_accept(arg1, arg2, arg3);
6008 break;
6009 #endif
6010 #ifdef TARGET_NR_bind
6011 case TARGET_NR_bind:
6012 ret = do_bind(arg1, arg2, arg3);
6013 break;
6014 #endif
6015 #ifdef TARGET_NR_connect
6016 case TARGET_NR_connect:
6017 ret = do_connect(arg1, arg2, arg3);
6018 break;
6019 #endif
6020 #ifdef TARGET_NR_getpeername
6021 case TARGET_NR_getpeername:
6022 ret = do_getpeername(arg1, arg2, arg3);
6023 break;
6024 #endif
6025 #ifdef TARGET_NR_getsockname
6026 case TARGET_NR_getsockname:
6027 ret = do_getsockname(arg1, arg2, arg3);
6028 break;
6029 #endif
6030 #ifdef TARGET_NR_getsockopt
6031 case TARGET_NR_getsockopt:
6032 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6033 break;
6034 #endif
6035 #ifdef TARGET_NR_listen
6036 case TARGET_NR_listen:
6037 ret = get_errno(listen(arg1, arg2));
6038 break;
6039 #endif
6040 #ifdef TARGET_NR_recv
6041 case TARGET_NR_recv:
6042 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6043 break;
6044 #endif
6045 #ifdef TARGET_NR_recvfrom
6046 case TARGET_NR_recvfrom:
6047 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6048 break;
6049 #endif
6050 #ifdef TARGET_NR_recvmsg
6051 case TARGET_NR_recvmsg:
6052 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6053 break;
6054 #endif
6055 #ifdef TARGET_NR_send
6056 case TARGET_NR_send:
6057 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6058 break;
6059 #endif
6060 #ifdef TARGET_NR_sendmsg
6061 case TARGET_NR_sendmsg:
6062 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6063 break;
6064 #endif
6065 #ifdef TARGET_NR_sendto
6066 case TARGET_NR_sendto:
6067 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6068 break;
6069 #endif
6070 #ifdef TARGET_NR_shutdown
6071 case TARGET_NR_shutdown:
6072 ret = get_errno(shutdown(arg1, arg2));
6073 break;
6074 #endif
6075 #ifdef TARGET_NR_socket
6076 case TARGET_NR_socket:
6077 ret = do_socket(arg1, arg2, arg3);
6078 break;
6079 #endif
6080 #ifdef TARGET_NR_socketpair
6081 case TARGET_NR_socketpair:
6082 ret = do_socketpair(arg1, arg2, arg3, arg4);
6083 break;
6084 #endif
6085 #ifdef TARGET_NR_setsockopt
6086 case TARGET_NR_setsockopt:
6087 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6088 break;
6089 #endif
6090
6091 case TARGET_NR_syslog:
6092 if (!(p = lock_user_string(arg2)))
6093 goto efault;
6094 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6095 unlock_user(p, arg2, 0);
6096 break;
6097
6098 case TARGET_NR_setitimer:
6099 {
6100 struct itimerval value, ovalue, *pvalue;
6101
6102 if (arg2) {
6103 pvalue = &value;
6104 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6105 || copy_from_user_timeval(&pvalue->it_value,
6106 arg2 + sizeof(struct target_timeval)))
6107 goto efault;
6108 } else {
6109 pvalue = NULL;
6110 }
6111 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6112 if (!is_error(ret) && arg3) {
6113 if (copy_to_user_timeval(arg3,
6114 &ovalue.it_interval)
6115 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6116 &ovalue.it_value))
6117 goto efault;
6118 }
6119 }
6120 break;
6121 case TARGET_NR_getitimer:
6122 {
6123 struct itimerval value;
6124
6125 ret = get_errno(getitimer(arg1, &value));
6126 if (!is_error(ret) && arg2) {
6127 if (copy_to_user_timeval(arg2,
6128 &value.it_interval)
6129 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6130 &value.it_value))
6131 goto efault;
6132 }
6133 }
6134 break;
6135 case TARGET_NR_stat:
6136 if (!(p = lock_user_string(arg1)))
6137 goto efault;
6138 ret = get_errno(stat(path(p), &st));
6139 unlock_user(p, arg1, 0);
6140 goto do_stat;
6141 case TARGET_NR_lstat:
6142 if (!(p = lock_user_string(arg1)))
6143 goto efault;
6144 ret = get_errno(lstat(path(p), &st));
6145 unlock_user(p, arg1, 0);
6146 goto do_stat;
6147 case TARGET_NR_fstat:
6148 {
6149 ret = get_errno(fstat(arg1, &st));
6150 do_stat:
6151 if (!is_error(ret)) {
6152 struct target_stat *target_st;
6153
6154 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6155 goto efault;
6156 memset(target_st, 0, sizeof(*target_st));
6157 __put_user(st.st_dev, &target_st->st_dev);
6158 __put_user(st.st_ino, &target_st->st_ino);
6159 __put_user(st.st_mode, &target_st->st_mode);
6160 __put_user(st.st_uid, &target_st->st_uid);
6161 __put_user(st.st_gid, &target_st->st_gid);
6162 __put_user(st.st_nlink, &target_st->st_nlink);
6163 __put_user(st.st_rdev, &target_st->st_rdev);
6164 __put_user(st.st_size, &target_st->st_size);
6165 __put_user(st.st_blksize, &target_st->st_blksize);
6166 __put_user(st.st_blocks, &target_st->st_blocks);
6167 __put_user(st.st_atime, &target_st->target_st_atime);
6168 __put_user(st.st_mtime, &target_st->target_st_mtime);
6169 __put_user(st.st_ctime, &target_st->target_st_ctime);
6170 unlock_user_struct(target_st, arg2, 1);
6171 }
6172 }
6173 break;
6174 #ifdef TARGET_NR_olduname
6175 case TARGET_NR_olduname:
6176 goto unimplemented;
6177 #endif
6178 #ifdef TARGET_NR_iopl
6179 case TARGET_NR_iopl:
6180 goto unimplemented;
6181 #endif
6182 case TARGET_NR_vhangup:
6183 ret = get_errno(vhangup());
6184 break;
6185 #ifdef TARGET_NR_idle
6186 case TARGET_NR_idle:
6187 goto unimplemented;
6188 #endif
6189 #ifdef TARGET_NR_syscall
6190 case TARGET_NR_syscall:
6191 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6192 arg6, arg7, arg8, 0);
6193 break;
6194 #endif
6195 case TARGET_NR_wait4:
6196 {
6197 int status;
6198 abi_long status_ptr = arg2;
6199 struct rusage rusage, *rusage_ptr;
6200 abi_ulong target_rusage = arg4;
6201 if (target_rusage)
6202 rusage_ptr = &rusage;
6203 else
6204 rusage_ptr = NULL;
6205 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6206 if (!is_error(ret)) {
6207 if (status_ptr) {
6208 status = host_to_target_waitstatus(status);
6209 if (put_user_s32(status, status_ptr))
6210 goto efault;
6211 }
6212 if (target_rusage)
6213 host_to_target_rusage(target_rusage, &rusage);
6214 }
6215 }
6216 break;
6217 #ifdef TARGET_NR_swapoff
6218 case TARGET_NR_swapoff:
6219 if (!(p = lock_user_string(arg1)))
6220 goto efault;
6221 ret = get_errno(swapoff(p));
6222 unlock_user(p, arg1, 0);
6223 break;
6224 #endif
6225 case TARGET_NR_sysinfo:
6226 {
6227 struct target_sysinfo *target_value;
6228 struct sysinfo value;
6229 ret = get_errno(sysinfo(&value));
6230 if (!is_error(ret) && arg1)
6231 {
6232 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6233 goto efault;
6234 __put_user(value.uptime, &target_value->uptime);
6235 __put_user(value.loads[0], &target_value->loads[0]);
6236 __put_user(value.loads[1], &target_value->loads[1]);
6237 __put_user(value.loads[2], &target_value->loads[2]);
6238 __put_user(value.totalram, &target_value->totalram);
6239 __put_user(value.freeram, &target_value->freeram);
6240 __put_user(value.sharedram, &target_value->sharedram);
6241 __put_user(value.bufferram, &target_value->bufferram);
6242 __put_user(value.totalswap, &target_value->totalswap);
6243 __put_user(value.freeswap, &target_value->freeswap);
6244 __put_user(value.procs, &target_value->procs);
6245 __put_user(value.totalhigh, &target_value->totalhigh);
6246 __put_user(value.freehigh, &target_value->freehigh);
6247 __put_user(value.mem_unit, &target_value->mem_unit);
6248 unlock_user_struct(target_value, arg1, 1);
6249 }
6250 }
6251 break;
6252 #ifdef TARGET_NR_ipc
6253 case TARGET_NR_ipc:
6254 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6255 break;
6256 #endif
6257 #ifdef TARGET_NR_semget
6258 case TARGET_NR_semget:
6259 ret = get_errno(semget(arg1, arg2, arg3));
6260 break;
6261 #endif
6262 #ifdef TARGET_NR_semop
6263 case TARGET_NR_semop:
6264 ret = get_errno(do_semop(arg1, arg2, arg3));
6265 break;
6266 #endif
6267 #ifdef TARGET_NR_semctl
6268 case TARGET_NR_semctl:
6269 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6270 break;
6271 #endif
6272 #ifdef TARGET_NR_msgctl
6273 case TARGET_NR_msgctl:
6274 ret = do_msgctl(arg1, arg2, arg3);
6275 break;
6276 #endif
6277 #ifdef TARGET_NR_msgget
6278 case TARGET_NR_msgget:
6279 ret = get_errno(msgget(arg1, arg2));
6280 break;
6281 #endif
6282 #ifdef TARGET_NR_msgrcv
6283 case TARGET_NR_msgrcv:
6284 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6285 break;
6286 #endif
6287 #ifdef TARGET_NR_msgsnd
6288 case TARGET_NR_msgsnd:
6289 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6290 break;
6291 #endif
6292 #ifdef TARGET_NR_shmget
6293 case TARGET_NR_shmget:
6294 ret = get_errno(shmget(arg1, arg2, arg3));
6295 break;
6296 #endif
6297 #ifdef TARGET_NR_shmctl
6298 case TARGET_NR_shmctl:
6299 ret = do_shmctl(arg1, arg2, arg3);
6300 break;
6301 #endif
6302 #ifdef TARGET_NR_shmat
6303 case TARGET_NR_shmat:
6304 ret = do_shmat(arg1, arg2, arg3);
6305 break;
6306 #endif
6307 #ifdef TARGET_NR_shmdt
6308 case TARGET_NR_shmdt:
6309 ret = do_shmdt(arg1);
6310 break;
6311 #endif
6312 case TARGET_NR_fsync:
6313 ret = get_errno(fsync(arg1));
6314 break;
6315 case TARGET_NR_clone:
6316 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6317 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6318 #elif defined(TARGET_CRIS)
6319 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6320 #elif defined(TARGET_S390X)
6321 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6322 #else
6323 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6324 #endif
6325 break;
6326 #ifdef __NR_exit_group
6327 /* new thread calls */
6328 case TARGET_NR_exit_group:
6329 #ifdef TARGET_GPROF
6330 _mcleanup();
6331 #endif
6332 gdb_exit(cpu_env, arg1);
6333 ret = get_errno(exit_group(arg1));
6334 break;
6335 #endif
6336 case TARGET_NR_setdomainname:
6337 if (!(p = lock_user_string(arg1)))
6338 goto efault;
6339 ret = get_errno(setdomainname(p, arg2));
6340 unlock_user(p, arg1, 0);
6341 break;
6342 case TARGET_NR_uname:
6343 /* no need to transcode because we use the linux syscall */
6344 {
6345 struct new_utsname * buf;
6346
6347 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6348 goto efault;
6349 ret = get_errno(sys_uname(buf));
6350 if (!is_error(ret)) {
6351 /* Overrite the native machine name with whatever is being
6352 emulated. */
6353 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6354 /* Allow the user to override the reported release. */
6355 if (qemu_uname_release && *qemu_uname_release)
6356 strcpy (buf->release, qemu_uname_release);
6357 }
6358 unlock_user_struct(buf, arg1, 1);
6359 }
6360 break;
6361 #ifdef TARGET_I386
6362 case TARGET_NR_modify_ldt:
6363 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6364 break;
6365 #if !defined(TARGET_X86_64)
6366 case TARGET_NR_vm86old:
6367 goto unimplemented;
6368 case TARGET_NR_vm86:
6369 ret = do_vm86(cpu_env, arg1, arg2);
6370 break;
6371 #endif
6372 #endif
6373 case TARGET_NR_adjtimex:
6374 goto unimplemented;
6375 #ifdef TARGET_NR_create_module
6376 case TARGET_NR_create_module:
6377 #endif
6378 case TARGET_NR_init_module:
6379 case TARGET_NR_delete_module:
6380 #ifdef TARGET_NR_get_kernel_syms
6381 case TARGET_NR_get_kernel_syms:
6382 #endif
6383 goto unimplemented;
6384 case TARGET_NR_quotactl:
6385 goto unimplemented;
6386 case TARGET_NR_getpgid:
6387 ret = get_errno(getpgid(arg1));
6388 break;
6389 case TARGET_NR_fchdir:
6390 ret = get_errno(fchdir(arg1));
6391 break;
6392 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6393 case TARGET_NR_bdflush:
6394 goto unimplemented;
6395 #endif
6396 #ifdef TARGET_NR_sysfs
6397 case TARGET_NR_sysfs:
6398 goto unimplemented;
6399 #endif
6400 case TARGET_NR_personality:
6401 ret = get_errno(personality(arg1));
6402 break;
6403 #ifdef TARGET_NR_afs_syscall
6404 case TARGET_NR_afs_syscall:
6405 goto unimplemented;
6406 #endif
6407 #ifdef TARGET_NR__llseek /* Not on alpha */
6408 case TARGET_NR__llseek:
6409 {
6410 int64_t res;
6411 #if !defined(__NR_llseek)
6412 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6413 if (res == -1) {
6414 ret = get_errno(res);
6415 } else {
6416 ret = 0;
6417 }
6418 #else
6419 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6420 #endif
6421 if ((ret == 0) && put_user_s64(res, arg4)) {
6422 goto efault;
6423 }
6424 }
6425 break;
6426 #endif
6427 case TARGET_NR_getdents:
6428 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6429 {
6430 struct target_dirent *target_dirp;
6431 struct linux_dirent *dirp;
6432 abi_long count = arg3;
6433
6434 dirp = malloc(count);
6435 if (!dirp) {
6436 ret = -TARGET_ENOMEM;
6437 goto fail;
6438 }
6439
6440 ret = get_errno(sys_getdents(arg1, dirp, count));
6441 if (!is_error(ret)) {
6442 struct linux_dirent *de;
6443 struct target_dirent *tde;
6444 int len = ret;
6445 int reclen, treclen;
6446 int count1, tnamelen;
6447
6448 count1 = 0;
6449 de = dirp;
6450 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6451 goto efault;
6452 tde = target_dirp;
6453 while (len > 0) {
6454 reclen = de->d_reclen;
6455 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6456 tde->d_reclen = tswap16(treclen);
6457 tde->d_ino = tswapl(de->d_ino);
6458 tde->d_off = tswapl(de->d_off);
6459 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6460 if (tnamelen > 256)
6461 tnamelen = 256;
6462 /* XXX: may not be correct */
6463 pstrcpy(tde->d_name, tnamelen, de->d_name);
6464 de = (struct linux_dirent *)((char *)de + reclen);
6465 len -= reclen;
6466 tde = (struct target_dirent *)((char *)tde + treclen);
6467 count1 += treclen;
6468 }
6469 ret = count1;
6470 unlock_user(target_dirp, arg2, ret);
6471 }
6472 free(dirp);
6473 }
6474 #else
6475 {
6476 struct linux_dirent *dirp;
6477 abi_long count = arg3;
6478
6479 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6480 goto efault;
6481 ret = get_errno(sys_getdents(arg1, dirp, count));
6482 if (!is_error(ret)) {
6483 struct linux_dirent *de;
6484 int len = ret;
6485 int reclen;
6486 de = dirp;
6487 while (len > 0) {
6488 reclen = de->d_reclen;
6489 if (reclen > len)
6490 break;
6491 de->d_reclen = tswap16(reclen);
6492 tswapls(&de->d_ino);
6493 tswapls(&de->d_off);
6494 de = (struct linux_dirent *)((char *)de + reclen);
6495 len -= reclen;
6496 }
6497 }
6498 unlock_user(dirp, arg2, ret);
6499 }
6500 #endif
6501 break;
6502 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6503 case TARGET_NR_getdents64:
6504 {
6505 struct linux_dirent64 *dirp;
6506 abi_long count = arg3;
6507 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6508 goto efault;
6509 ret = get_errno(sys_getdents64(arg1, dirp, count));
6510 if (!is_error(ret)) {
6511 struct linux_dirent64 *de;
6512 int len = ret;
6513 int reclen;
6514 de = dirp;
6515 while (len > 0) {
6516 reclen = de->d_reclen;
6517 if (reclen > len)
6518 break;
6519 de->d_reclen = tswap16(reclen);
6520 tswap64s((uint64_t *)&de->d_ino);
6521 tswap64s((uint64_t *)&de->d_off);
6522 de = (struct linux_dirent64 *)((char *)de + reclen);
6523 len -= reclen;
6524 }
6525 }
6526 unlock_user(dirp, arg2, ret);
6527 }
6528 break;
6529 #endif /* TARGET_NR_getdents64 */
6530 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6531 #ifdef TARGET_S390X
6532 case TARGET_NR_select:
6533 #else
6534 case TARGET_NR__newselect:
6535 #endif
6536 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6537 break;
6538 #endif
6539 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6540 # ifdef TARGET_NR_poll
6541 case TARGET_NR_poll:
6542 # endif
6543 # ifdef TARGET_NR_ppoll
6544 case TARGET_NR_ppoll:
6545 # endif
6546 {
6547 struct target_pollfd *target_pfd;
6548 unsigned int nfds = arg2;
6549 int timeout = arg3;
6550 struct pollfd *pfd;
6551 unsigned int i;
6552
6553 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6554 if (!target_pfd)
6555 goto efault;
6556
6557 pfd = alloca(sizeof(struct pollfd) * nfds);
6558 for(i = 0; i < nfds; i++) {
6559 pfd[i].fd = tswap32(target_pfd[i].fd);
6560 pfd[i].events = tswap16(target_pfd[i].events);
6561 }
6562
6563 # ifdef TARGET_NR_ppoll
6564 if (num == TARGET_NR_ppoll) {
6565 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6566 target_sigset_t *target_set;
6567 sigset_t _set, *set = &_set;
6568
6569 if (arg3) {
6570 if (target_to_host_timespec(timeout_ts, arg3)) {
6571 unlock_user(target_pfd, arg1, 0);
6572 goto efault;
6573 }
6574 } else {
6575 timeout_ts = NULL;
6576 }
6577
6578 if (arg4) {
6579 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6580 if (!target_set) {
6581 unlock_user(target_pfd, arg1, 0);
6582 goto efault;
6583 }
6584 target_to_host_sigset(set, target_set);
6585 } else {
6586 set = NULL;
6587 }
6588
6589 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6590
6591 if (!is_error(ret) && arg3) {
6592 host_to_target_timespec(arg3, timeout_ts);
6593 }
6594 if (arg4) {
6595 unlock_user(target_set, arg4, 0);
6596 }
6597 } else
6598 # endif
6599 ret = get_errno(poll(pfd, nfds, timeout));
6600
6601 if (!is_error(ret)) {
6602 for(i = 0; i < nfds; i++) {
6603 target_pfd[i].revents = tswap16(pfd[i].revents);
6604 }
6605 }
6606 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6607 }
6608 break;
6609 #endif
6610 case TARGET_NR_flock:
6611 /* NOTE: the flock constant seems to be the same for every
6612 Linux platform */
6613 ret = get_errno(flock(arg1, arg2));
6614 break;
6615 case TARGET_NR_readv:
6616 {
6617 int count = arg3;
6618 struct iovec *vec;
6619
6620 vec = alloca(count * sizeof(struct iovec));
6621 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6622 goto efault;
6623 ret = get_errno(readv(arg1, vec, count));
6624 unlock_iovec(vec, arg2, count, 1);
6625 }
6626 break;
6627 case TARGET_NR_writev:
6628 {
6629 int count = arg3;
6630 struct iovec *vec;
6631
6632 vec = alloca(count * sizeof(struct iovec));
6633 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6634 goto efault;
6635 ret = get_errno(writev(arg1, vec, count));
6636 unlock_iovec(vec, arg2, count, 0);
6637 }
6638 break;
6639 case TARGET_NR_getsid:
6640 ret = get_errno(getsid(arg1));
6641 break;
6642 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6643 case TARGET_NR_fdatasync:
6644 ret = get_errno(fdatasync(arg1));
6645 break;
6646 #endif
6647 case TARGET_NR__sysctl:
6648 /* We don't implement this, but ENOTDIR is always a safe
6649 return value. */
6650 ret = -TARGET_ENOTDIR;
6651 break;
6652 case TARGET_NR_sched_getaffinity:
6653 {
6654 unsigned int mask_size;
6655 unsigned long *mask;
6656
6657 /*
6658 * sched_getaffinity needs multiples of ulong, so need to take
6659 * care of mismatches between target ulong and host ulong sizes.
6660 */
6661 if (arg2 & (sizeof(abi_ulong) - 1)) {
6662 ret = -TARGET_EINVAL;
6663 break;
6664 }
6665 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6666
6667 mask = alloca(mask_size);
6668 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6669
6670 if (!is_error(ret)) {
6671 if (copy_to_user(arg3, mask, ret)) {
6672 goto efault;
6673 }
6674 }
6675 }
6676 break;
6677 case TARGET_NR_sched_setaffinity:
6678 {
6679 unsigned int mask_size;
6680 unsigned long *mask;
6681
6682 /*
6683 * sched_setaffinity needs multiples of ulong, so need to take
6684 * care of mismatches between target ulong and host ulong sizes.
6685 */
6686 if (arg2 & (sizeof(abi_ulong) - 1)) {
6687 ret = -TARGET_EINVAL;
6688 break;
6689 }
6690 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6691
6692 mask = alloca(mask_size);
6693 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6694 goto efault;
6695 }
6696 memcpy(mask, p, arg2);
6697 unlock_user_struct(p, arg2, 0);
6698
6699 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6700 }
6701 break;
6702 case TARGET_NR_sched_setparam:
6703 {
6704 struct sched_param *target_schp;
6705 struct sched_param schp;
6706
6707 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6708 goto efault;
6709 schp.sched_priority = tswap32(target_schp->sched_priority);
6710 unlock_user_struct(target_schp, arg2, 0);
6711 ret = get_errno(sched_setparam(arg1, &schp));
6712 }
6713 break;
6714 case TARGET_NR_sched_getparam:
6715 {
6716 struct sched_param *target_schp;
6717 struct sched_param schp;
6718 ret = get_errno(sched_getparam(arg1, &schp));
6719 if (!is_error(ret)) {
6720 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6721 goto efault;
6722 target_schp->sched_priority = tswap32(schp.sched_priority);
6723 unlock_user_struct(target_schp, arg2, 1);
6724 }
6725 }
6726 break;
6727 case TARGET_NR_sched_setscheduler:
6728 {
6729 struct sched_param *target_schp;
6730 struct sched_param schp;
6731 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6732 goto efault;
6733 schp.sched_priority = tswap32(target_schp->sched_priority);
6734 unlock_user_struct(target_schp, arg3, 0);
6735 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6736 }
6737 break;
6738 case TARGET_NR_sched_getscheduler:
6739 ret = get_errno(sched_getscheduler(arg1));
6740 break;
6741 case TARGET_NR_sched_yield:
6742 ret = get_errno(sched_yield());
6743 break;
6744 case TARGET_NR_sched_get_priority_max:
6745 ret = get_errno(sched_get_priority_max(arg1));
6746 break;
6747 case TARGET_NR_sched_get_priority_min:
6748 ret = get_errno(sched_get_priority_min(arg1));
6749 break;
6750 case TARGET_NR_sched_rr_get_interval:
6751 {
6752 struct timespec ts;
6753 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6754 if (!is_error(ret)) {
6755 host_to_target_timespec(arg2, &ts);
6756 }
6757 }
6758 break;
6759 case TARGET_NR_nanosleep:
6760 {
6761 struct timespec req, rem;
6762 target_to_host_timespec(&req, arg1);
6763 ret = get_errno(nanosleep(&req, &rem));
6764 if (is_error(ret) && arg2) {
6765 host_to_target_timespec(arg2, &rem);
6766 }
6767 }
6768 break;
6769 #ifdef TARGET_NR_query_module
6770 case TARGET_NR_query_module:
6771 goto unimplemented;
6772 #endif
6773 #ifdef TARGET_NR_nfsservctl
6774 case TARGET_NR_nfsservctl:
6775 goto unimplemented;
6776 #endif
6777 case TARGET_NR_prctl:
6778 switch (arg1)
6779 {
6780 case PR_GET_PDEATHSIG:
6781 {
6782 int deathsig;
6783 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6784 if (!is_error(ret) && arg2
6785 && put_user_ual(deathsig, arg2))
6786 goto efault;
6787 }
6788 break;
6789 default:
6790 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6791 break;
6792 }
6793 break;
6794 #ifdef TARGET_NR_arch_prctl
6795 case TARGET_NR_arch_prctl:
6796 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6797 ret = do_arch_prctl(cpu_env, arg1, arg2);
6798 break;
6799 #else
6800 goto unimplemented;
6801 #endif
6802 #endif
6803 #ifdef TARGET_NR_pread
6804 case TARGET_NR_pread:
6805 #ifdef TARGET_ARM
6806 if (((CPUARMState *)cpu_env)->eabi)
6807 arg4 = arg5;
6808 #endif
6809 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6810 goto efault;
6811 ret = get_errno(pread(arg1, p, arg3, arg4));
6812 unlock_user(p, arg2, ret);
6813 break;
6814 case TARGET_NR_pwrite:
6815 #ifdef TARGET_ARM
6816 if (((CPUARMState *)cpu_env)->eabi)
6817 arg4 = arg5;
6818 #endif
6819 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6820 goto efault;
6821 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6822 unlock_user(p, arg2, 0);
6823 break;
6824 #endif
6825 #ifdef TARGET_NR_pread64
6826 case TARGET_NR_pread64:
6827 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6828 goto efault;
6829 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6830 unlock_user(p, arg2, ret);
6831 break;
6832 case TARGET_NR_pwrite64:
6833 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6834 goto efault;
6835 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6836 unlock_user(p, arg2, 0);
6837 break;
6838 #endif
6839 case TARGET_NR_getcwd:
6840 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6841 goto efault;
6842 ret = get_errno(sys_getcwd1(p, arg2));
6843 unlock_user(p, arg1, ret);
6844 break;
6845 case TARGET_NR_capget:
6846 goto unimplemented;
6847 case TARGET_NR_capset:
6848 goto unimplemented;
6849 case TARGET_NR_sigaltstack:
6850 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6851 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6852 defined(TARGET_M68K) || defined(TARGET_S390X)
6853 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6854 break;
6855 #else
6856 goto unimplemented;
6857 #endif
6858 case TARGET_NR_sendfile:
6859 goto unimplemented;
6860 #ifdef TARGET_NR_getpmsg
6861 case TARGET_NR_getpmsg:
6862 goto unimplemented;
6863 #endif
6864 #ifdef TARGET_NR_putpmsg
6865 case TARGET_NR_putpmsg:
6866 goto unimplemented;
6867 #endif
6868 #ifdef TARGET_NR_vfork
6869 case TARGET_NR_vfork:
6870 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6871 0, 0, 0, 0));
6872 break;
6873 #endif
6874 #ifdef TARGET_NR_ugetrlimit
6875 case TARGET_NR_ugetrlimit:
6876 {
6877 struct rlimit rlim;
6878 ret = get_errno(getrlimit(arg1, &rlim));
6879 if (!is_error(ret)) {
6880 struct target_rlimit *target_rlim;
6881 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6882 goto efault;
6883 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6884 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6885 unlock_user_struct(target_rlim, arg2, 1);
6886 }
6887 break;
6888 }
6889 #endif
6890 #ifdef TARGET_NR_truncate64
6891 case TARGET_NR_truncate64:
6892 if (!(p = lock_user_string(arg1)))
6893 goto efault;
6894 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6895 unlock_user(p, arg1, 0);
6896 break;
6897 #endif
6898 #ifdef TARGET_NR_ftruncate64
6899 case TARGET_NR_ftruncate64:
6900 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6901 break;
6902 #endif
6903 #ifdef TARGET_NR_stat64
6904 case TARGET_NR_stat64:
6905 if (!(p = lock_user_string(arg1)))
6906 goto efault;
6907 ret = get_errno(stat(path(p), &st));
6908 unlock_user(p, arg1, 0);
6909 if (!is_error(ret))
6910 ret = host_to_target_stat64(cpu_env, arg2, &st);
6911 break;
6912 #endif
6913 #ifdef TARGET_NR_lstat64
6914 case TARGET_NR_lstat64:
6915 if (!(p = lock_user_string(arg1)))
6916 goto efault;
6917 ret = get_errno(lstat(path(p), &st));
6918 unlock_user(p, arg1, 0);
6919 if (!is_error(ret))
6920 ret = host_to_target_stat64(cpu_env, arg2, &st);
6921 break;
6922 #endif
6923 #ifdef TARGET_NR_fstat64
6924 case TARGET_NR_fstat64:
6925 ret = get_errno(fstat(arg1, &st));
6926 if (!is_error(ret))
6927 ret = host_to_target_stat64(cpu_env, arg2, &st);
6928 break;
6929 #endif
6930 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6931 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6932 #ifdef TARGET_NR_fstatat64
6933 case TARGET_NR_fstatat64:
6934 #endif
6935 #ifdef TARGET_NR_newfstatat
6936 case TARGET_NR_newfstatat:
6937 #endif
6938 if (!(p = lock_user_string(arg2)))
6939 goto efault;
6940 #ifdef __NR_fstatat64
6941 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6942 #else
6943 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6944 #endif
6945 if (!is_error(ret))
6946 ret = host_to_target_stat64(cpu_env, arg3, &st);
6947 break;
6948 #endif
6949 case TARGET_NR_lchown:
6950 if (!(p = lock_user_string(arg1)))
6951 goto efault;
6952 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6953 unlock_user(p, arg1, 0);
6954 break;
6955 #ifdef TARGET_NR_getuid
6956 case TARGET_NR_getuid:
6957 ret = get_errno(high2lowuid(getuid()));
6958 break;
6959 #endif
6960 #ifdef TARGET_NR_getgid
6961 case TARGET_NR_getgid:
6962 ret = get_errno(high2lowgid(getgid()));
6963 break;
6964 #endif
6965 #ifdef TARGET_NR_geteuid
6966 case TARGET_NR_geteuid:
6967 ret = get_errno(high2lowuid(geteuid()));
6968 break;
6969 #endif
6970 #ifdef TARGET_NR_getegid
6971 case TARGET_NR_getegid:
6972 ret = get_errno(high2lowgid(getegid()));
6973 break;
6974 #endif
6975 case TARGET_NR_setreuid:
6976 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6977 break;
6978 case TARGET_NR_setregid:
6979 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6980 break;
6981 case TARGET_NR_getgroups:
6982 {
6983 int gidsetsize = arg1;
6984 target_id *target_grouplist;
6985 gid_t *grouplist;
6986 int i;
6987
6988 grouplist = alloca(gidsetsize * sizeof(gid_t));
6989 ret = get_errno(getgroups(gidsetsize, grouplist));
6990 if (gidsetsize == 0)
6991 break;
6992 if (!is_error(ret)) {
6993 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6994 if (!target_grouplist)
6995 goto efault;
6996 for(i = 0;i < ret; i++)
6997 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
6998 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6999 }
7000 }
7001 break;
7002 case TARGET_NR_setgroups:
7003 {
7004 int gidsetsize = arg1;
7005 target_id *target_grouplist;
7006 gid_t *grouplist;
7007 int i;
7008
7009 grouplist = alloca(gidsetsize * sizeof(gid_t));
7010 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7011 if (!target_grouplist) {
7012 ret = -TARGET_EFAULT;
7013 goto fail;
7014 }
7015 for(i = 0;i < gidsetsize; i++)
7016 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7017 unlock_user(target_grouplist, arg2, 0);
7018 ret = get_errno(setgroups(gidsetsize, grouplist));
7019 }
7020 break;
7021 case TARGET_NR_fchown:
7022 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7023 break;
7024 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7025 case TARGET_NR_fchownat:
7026 if (!(p = lock_user_string(arg2)))
7027 goto efault;
7028 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7029 unlock_user(p, arg2, 0);
7030 break;
7031 #endif
7032 #ifdef TARGET_NR_setresuid
7033 case TARGET_NR_setresuid:
7034 ret = get_errno(setresuid(low2highuid(arg1),
7035 low2highuid(arg2),
7036 low2highuid(arg3)));
7037 break;
7038 #endif
7039 #ifdef TARGET_NR_getresuid
7040 case TARGET_NR_getresuid:
7041 {
7042 uid_t ruid, euid, suid;
7043 ret = get_errno(getresuid(&ruid, &euid, &suid));
7044 if (!is_error(ret)) {
7045 if (put_user_u16(high2lowuid(ruid), arg1)
7046 || put_user_u16(high2lowuid(euid), arg2)
7047 || put_user_u16(high2lowuid(suid), arg3))
7048 goto efault;
7049 }
7050 }
7051 break;
7052 #endif
7053 #ifdef TARGET_NR_getresgid
7054 case TARGET_NR_setresgid:
7055 ret = get_errno(setresgid(low2highgid(arg1),
7056 low2highgid(arg2),
7057 low2highgid(arg3)));
7058 break;
7059 #endif
7060 #ifdef TARGET_NR_getresgid
7061 case TARGET_NR_getresgid:
7062 {
7063 gid_t rgid, egid, sgid;
7064 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7065 if (!is_error(ret)) {
7066 if (put_user_u16(high2lowgid(rgid), arg1)
7067 || put_user_u16(high2lowgid(egid), arg2)
7068 || put_user_u16(high2lowgid(sgid), arg3))
7069 goto efault;
7070 }
7071 }
7072 break;
7073 #endif
7074 case TARGET_NR_chown:
7075 if (!(p = lock_user_string(arg1)))
7076 goto efault;
7077 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7078 unlock_user(p, arg1, 0);
7079 break;
7080 case TARGET_NR_setuid:
7081 ret = get_errno(setuid(low2highuid(arg1)));
7082 break;
7083 case TARGET_NR_setgid:
7084 ret = get_errno(setgid(low2highgid(arg1)));
7085 break;
7086 case TARGET_NR_setfsuid:
7087 ret = get_errno(setfsuid(arg1));
7088 break;
7089 case TARGET_NR_setfsgid:
7090 ret = get_errno(setfsgid(arg1));
7091 break;
7092
7093 #ifdef TARGET_NR_lchown32
7094 case TARGET_NR_lchown32:
7095 if (!(p = lock_user_string(arg1)))
7096 goto efault;
7097 ret = get_errno(lchown(p, arg2, arg3));
7098 unlock_user(p, arg1, 0);
7099 break;
7100 #endif
7101 #ifdef TARGET_NR_getuid32
7102 case TARGET_NR_getuid32:
7103 ret = get_errno(getuid());
7104 break;
7105 #endif
7106
7107 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7108 /* Alpha specific */
7109 case TARGET_NR_getxuid:
7110 {
7111 uid_t euid;
7112 euid=geteuid();
7113 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7114 }
7115 ret = get_errno(getuid());
7116 break;
7117 #endif
7118 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7119 /* Alpha specific */
7120 case TARGET_NR_getxgid:
7121 {
7122 uid_t egid;
7123 egid=getegid();
7124 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7125 }
7126 ret = get_errno(getgid());
7127 break;
7128 #endif
7129 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7130 /* Alpha specific */
7131 case TARGET_NR_osf_getsysinfo:
7132 ret = -TARGET_EOPNOTSUPP;
7133 switch (arg1) {
7134 case TARGET_GSI_IEEE_FP_CONTROL:
7135 {
7136 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7137
7138 /* Copied from linux ieee_fpcr_to_swcr. */
7139 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7140 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7141 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7142 | SWCR_TRAP_ENABLE_DZE
7143 | SWCR_TRAP_ENABLE_OVF);
7144 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7145 | SWCR_TRAP_ENABLE_INE);
7146 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7147 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7148
7149 if (put_user_u64 (swcr, arg2))
7150 goto efault;
7151 ret = 0;
7152 }
7153 break;
7154
7155 /* case GSI_IEEE_STATE_AT_SIGNAL:
7156 -- Not implemented in linux kernel.
7157 case GSI_UACPROC:
7158 -- Retrieves current unaligned access state; not much used.
7159 case GSI_PROC_TYPE:
7160 -- Retrieves implver information; surely not used.
7161 case GSI_GET_HWRPB:
7162 -- Grabs a copy of the HWRPB; surely not used.
7163 */
7164 }
7165 break;
7166 #endif
7167 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7168 /* Alpha specific */
7169 case TARGET_NR_osf_setsysinfo:
7170 ret = -TARGET_EOPNOTSUPP;
7171 switch (arg1) {
7172 case TARGET_SSI_IEEE_FP_CONTROL:
7173 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7174 {
7175 uint64_t swcr, fpcr, orig_fpcr;
7176
7177 if (get_user_u64 (swcr, arg2))
7178 goto efault;
7179 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7180 fpcr = orig_fpcr & FPCR_DYN_MASK;
7181
7182 /* Copied from linux ieee_swcr_to_fpcr. */
7183 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7184 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7185 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7186 | SWCR_TRAP_ENABLE_DZE
7187 | SWCR_TRAP_ENABLE_OVF)) << 48;
7188 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7189 | SWCR_TRAP_ENABLE_INE)) << 57;
7190 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7191 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7192
7193 cpu_alpha_store_fpcr (cpu_env, fpcr);
7194 ret = 0;
7195
7196 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7197 /* Old exceptions are not signaled. */
7198 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7199
7200 /* If any exceptions set by this call, and are unmasked,
7201 send a signal. */
7202 /* ??? FIXME */
7203 }
7204 }
7205 break;
7206
7207 /* case SSI_NVPAIRS:
7208 -- Used with SSIN_UACPROC to enable unaligned accesses.
7209 case SSI_IEEE_STATE_AT_SIGNAL:
7210 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7211 -- Not implemented in linux kernel
7212 */
7213 }
7214 break;
7215 #endif
7216 #ifdef TARGET_NR_osf_sigprocmask
7217 /* Alpha specific. */
7218 case TARGET_NR_osf_sigprocmask:
7219 {
7220 abi_ulong mask;
7221 int how;
7222 sigset_t set, oldset;
7223
7224 switch(arg1) {
7225 case TARGET_SIG_BLOCK:
7226 how = SIG_BLOCK;
7227 break;
7228 case TARGET_SIG_UNBLOCK:
7229 how = SIG_UNBLOCK;
7230 break;
7231 case TARGET_SIG_SETMASK:
7232 how = SIG_SETMASK;
7233 break;
7234 default:
7235 ret = -TARGET_EINVAL;
7236 goto fail;
7237 }
7238 mask = arg2;
7239 target_to_host_old_sigset(&set, &mask);
7240 sigprocmask(how, &set, &oldset);
7241 host_to_target_old_sigset(&mask, &oldset);
7242 ret = mask;
7243 }
7244 break;
7245 #endif
7246
7247 #ifdef TARGET_NR_getgid32
7248 case TARGET_NR_getgid32:
7249 ret = get_errno(getgid());
7250 break;
7251 #endif
7252 #ifdef TARGET_NR_geteuid32
7253 case TARGET_NR_geteuid32:
7254 ret = get_errno(geteuid());
7255 break;
7256 #endif
7257 #ifdef TARGET_NR_getegid32
7258 case TARGET_NR_getegid32:
7259 ret = get_errno(getegid());
7260 break;
7261 #endif
7262 #ifdef TARGET_NR_setreuid32
7263 case TARGET_NR_setreuid32:
7264 ret = get_errno(setreuid(arg1, arg2));
7265 break;
7266 #endif
7267 #ifdef TARGET_NR_setregid32
7268 case TARGET_NR_setregid32:
7269 ret = get_errno(setregid(arg1, arg2));
7270 break;
7271 #endif
7272 #ifdef TARGET_NR_getgroups32
7273 case TARGET_NR_getgroups32:
7274 {
7275 int gidsetsize = arg1;
7276 uint32_t *target_grouplist;
7277 gid_t *grouplist;
7278 int i;
7279
7280 grouplist = alloca(gidsetsize * sizeof(gid_t));
7281 ret = get_errno(getgroups(gidsetsize, grouplist));
7282 if (gidsetsize == 0)
7283 break;
7284 if (!is_error(ret)) {
7285 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7286 if (!target_grouplist) {
7287 ret = -TARGET_EFAULT;
7288 goto fail;
7289 }
7290 for(i = 0;i < ret; i++)
7291 target_grouplist[i] = tswap32(grouplist[i]);
7292 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7293 }
7294 }
7295 break;
7296 #endif
7297 #ifdef TARGET_NR_setgroups32
7298 case TARGET_NR_setgroups32:
7299 {
7300 int gidsetsize = arg1;
7301 uint32_t *target_grouplist;
7302 gid_t *grouplist;
7303 int i;
7304
7305 grouplist = alloca(gidsetsize * sizeof(gid_t));
7306 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7307 if (!target_grouplist) {
7308 ret = -TARGET_EFAULT;
7309 goto fail;
7310 }
7311 for(i = 0;i < gidsetsize; i++)
7312 grouplist[i] = tswap32(target_grouplist[i]);
7313 unlock_user(target_grouplist, arg2, 0);
7314 ret = get_errno(setgroups(gidsetsize, grouplist));
7315 }
7316 break;
7317 #endif
7318 #ifdef TARGET_NR_fchown32
7319 case TARGET_NR_fchown32:
7320 ret = get_errno(fchown(arg1, arg2, arg3));
7321 break;
7322 #endif
7323 #ifdef TARGET_NR_setresuid32
7324 case TARGET_NR_setresuid32:
7325 ret = get_errno(setresuid(arg1, arg2, arg3));
7326 break;
7327 #endif
7328 #ifdef TARGET_NR_getresuid32
7329 case TARGET_NR_getresuid32:
7330 {
7331 uid_t ruid, euid, suid;
7332 ret = get_errno(getresuid(&ruid, &euid, &suid));
7333 if (!is_error(ret)) {
7334 if (put_user_u32(ruid, arg1)
7335 || put_user_u32(euid, arg2)
7336 || put_user_u32(suid, arg3))
7337 goto efault;
7338 }
7339 }
7340 break;
7341 #endif
7342 #ifdef TARGET_NR_setresgid32
7343 case TARGET_NR_setresgid32:
7344 ret = get_errno(setresgid(arg1, arg2, arg3));
7345 break;
7346 #endif
7347 #ifdef TARGET_NR_getresgid32
7348 case TARGET_NR_getresgid32:
7349 {
7350 gid_t rgid, egid, sgid;
7351 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7352 if (!is_error(ret)) {
7353 if (put_user_u32(rgid, arg1)
7354 || put_user_u32(egid, arg2)
7355 || put_user_u32(sgid, arg3))
7356 goto efault;
7357 }
7358 }
7359 break;
7360 #endif
7361 #ifdef TARGET_NR_chown32
7362 case TARGET_NR_chown32:
7363 if (!(p = lock_user_string(arg1)))
7364 goto efault;
7365 ret = get_errno(chown(p, arg2, arg3));
7366 unlock_user(p, arg1, 0);
7367 break;
7368 #endif
7369 #ifdef TARGET_NR_setuid32
7370 case TARGET_NR_setuid32:
7371 ret = get_errno(setuid(arg1));
7372 break;
7373 #endif
7374 #ifdef TARGET_NR_setgid32
7375 case TARGET_NR_setgid32:
7376 ret = get_errno(setgid(arg1));
7377 break;
7378 #endif
7379 #ifdef TARGET_NR_setfsuid32
7380 case TARGET_NR_setfsuid32:
7381 ret = get_errno(setfsuid(arg1));
7382 break;
7383 #endif
7384 #ifdef TARGET_NR_setfsgid32
7385 case TARGET_NR_setfsgid32:
7386 ret = get_errno(setfsgid(arg1));
7387 break;
7388 #endif
7389
7390 case TARGET_NR_pivot_root:
7391 goto unimplemented;
7392 #ifdef TARGET_NR_mincore
7393 case TARGET_NR_mincore:
7394 {
7395 void *a;
7396 ret = -TARGET_EFAULT;
7397 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7398 goto efault;
7399 if (!(p = lock_user_string(arg3)))
7400 goto mincore_fail;
7401 ret = get_errno(mincore(a, arg2, p));
7402 unlock_user(p, arg3, ret);
7403 mincore_fail:
7404 unlock_user(a, arg1, 0);
7405 }
7406 break;
7407 #endif
7408 #ifdef TARGET_NR_arm_fadvise64_64
7409 case TARGET_NR_arm_fadvise64_64:
7410 {
7411 /*
7412 * arm_fadvise64_64 looks like fadvise64_64 but
7413 * with different argument order
7414 */
7415 abi_long temp;
7416 temp = arg3;
7417 arg3 = arg4;
7418 arg4 = temp;
7419 }
7420 #endif
7421 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7422 #ifdef TARGET_NR_fadvise64_64
7423 case TARGET_NR_fadvise64_64:
7424 #endif
7425 #ifdef TARGET_NR_fadvise64
7426 case TARGET_NR_fadvise64:
7427 #endif
7428 #ifdef TARGET_S390X
7429 switch (arg4) {
7430 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7431 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7432 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7433 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7434 default: break;
7435 }
7436 #endif
7437 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7438 break;
7439 #endif
7440 #ifdef TARGET_NR_madvise
7441 case TARGET_NR_madvise:
7442 /* A straight passthrough may not be safe because qemu sometimes
7443 turns private flie-backed mappings into anonymous mappings.
7444 This will break MADV_DONTNEED.
7445 This is a hint, so ignoring and returning success is ok. */
7446 ret = get_errno(0);
7447 break;
7448 #endif
7449 #if TARGET_ABI_BITS == 32
7450 case TARGET_NR_fcntl64:
7451 {
7452 int cmd;
7453 struct flock64 fl;
7454 struct target_flock64 *target_fl;
7455 #ifdef TARGET_ARM
7456 struct target_eabi_flock64 *target_efl;
7457 #endif
7458
7459 cmd = target_to_host_fcntl_cmd(arg2);
7460 if (cmd == -TARGET_EINVAL)
7461 return cmd;
7462
7463 switch(arg2) {
7464 case TARGET_F_GETLK64:
7465 #ifdef TARGET_ARM
7466 if (((CPUARMState *)cpu_env)->eabi) {
7467 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7468 goto efault;
7469 fl.l_type = tswap16(target_efl->l_type);
7470 fl.l_whence = tswap16(target_efl->l_whence);
7471 fl.l_start = tswap64(target_efl->l_start);
7472 fl.l_len = tswap64(target_efl->l_len);
7473 fl.l_pid = tswap32(target_efl->l_pid);
7474 unlock_user_struct(target_efl, arg3, 0);
7475 } else
7476 #endif
7477 {
7478 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7479 goto efault;
7480 fl.l_type = tswap16(target_fl->l_type);
7481 fl.l_whence = tswap16(target_fl->l_whence);
7482 fl.l_start = tswap64(target_fl->l_start);
7483 fl.l_len = tswap64(target_fl->l_len);
7484 fl.l_pid = tswap32(target_fl->l_pid);
7485 unlock_user_struct(target_fl, arg3, 0);
7486 }
7487 ret = get_errno(fcntl(arg1, cmd, &fl));
7488 if (ret == 0) {
7489 #ifdef TARGET_ARM
7490 if (((CPUARMState *)cpu_env)->eabi) {
7491 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7492 goto efault;
7493 target_efl->l_type = tswap16(fl.l_type);
7494 target_efl->l_whence = tswap16(fl.l_whence);
7495 target_efl->l_start = tswap64(fl.l_start);
7496 target_efl->l_len = tswap64(fl.l_len);
7497 target_efl->l_pid = tswap32(fl.l_pid);
7498 unlock_user_struct(target_efl, arg3, 1);
7499 } else
7500 #endif
7501 {
7502 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7503 goto efault;
7504 target_fl->l_type = tswap16(fl.l_type);
7505 target_fl->l_whence = tswap16(fl.l_whence);
7506 target_fl->l_start = tswap64(fl.l_start);
7507 target_fl->l_len = tswap64(fl.l_len);
7508 target_fl->l_pid = tswap32(fl.l_pid);
7509 unlock_user_struct(target_fl, arg3, 1);
7510 }
7511 }
7512 break;
7513
7514 case TARGET_F_SETLK64:
7515 case TARGET_F_SETLKW64:
7516 #ifdef TARGET_ARM
7517 if (((CPUARMState *)cpu_env)->eabi) {
7518 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7519 goto efault;
7520 fl.l_type = tswap16(target_efl->l_type);
7521 fl.l_whence = tswap16(target_efl->l_whence);
7522 fl.l_start = tswap64(target_efl->l_start);
7523 fl.l_len = tswap64(target_efl->l_len);
7524 fl.l_pid = tswap32(target_efl->l_pid);
7525 unlock_user_struct(target_efl, arg3, 0);
7526 } else
7527 #endif
7528 {
7529 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7530 goto efault;
7531 fl.l_type = tswap16(target_fl->l_type);
7532 fl.l_whence = tswap16(target_fl->l_whence);
7533 fl.l_start = tswap64(target_fl->l_start);
7534 fl.l_len = tswap64(target_fl->l_len);
7535 fl.l_pid = tswap32(target_fl->l_pid);
7536 unlock_user_struct(target_fl, arg3, 0);
7537 }
7538 ret = get_errno(fcntl(arg1, cmd, &fl));
7539 break;
7540 default:
7541 ret = do_fcntl(arg1, arg2, arg3);
7542 break;
7543 }
7544 break;
7545 }
7546 #endif
7547 #ifdef TARGET_NR_cacheflush
7548 case TARGET_NR_cacheflush:
7549 /* self-modifying code is handled automatically, so nothing needed */
7550 ret = 0;
7551 break;
7552 #endif
7553 #ifdef TARGET_NR_security
7554 case TARGET_NR_security:
7555 goto unimplemented;
7556 #endif
7557 #ifdef TARGET_NR_getpagesize
7558 case TARGET_NR_getpagesize:
7559 ret = TARGET_PAGE_SIZE;
7560 break;
7561 #endif
7562 case TARGET_NR_gettid:
7563 ret = get_errno(gettid());
7564 break;
7565 #ifdef TARGET_NR_readahead
7566 case TARGET_NR_readahead:
7567 #if TARGET_ABI_BITS == 32
7568 #ifdef TARGET_ARM
7569 if (((CPUARMState *)cpu_env)->eabi)
7570 {
7571 arg2 = arg3;
7572 arg3 = arg4;
7573 arg4 = arg5;
7574 }
7575 #endif
7576 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7577 #else
7578 ret = get_errno(readahead(arg1, arg2, arg3));
7579 #endif
7580 break;
7581 #endif
7582 #ifdef TARGET_NR_setxattr
7583 case TARGET_NR_setxattr:
7584 case TARGET_NR_lsetxattr:
7585 case TARGET_NR_fsetxattr:
7586 case TARGET_NR_getxattr:
7587 case TARGET_NR_lgetxattr:
7588 case TARGET_NR_fgetxattr:
7589 case TARGET_NR_listxattr:
7590 case TARGET_NR_llistxattr:
7591 case TARGET_NR_flistxattr:
7592 case TARGET_NR_removexattr:
7593 case TARGET_NR_lremovexattr:
7594 case TARGET_NR_fremovexattr:
7595 ret = -TARGET_EOPNOTSUPP;
7596 break;
7597 #endif
7598 #ifdef TARGET_NR_set_thread_area
7599 case TARGET_NR_set_thread_area:
7600 #if defined(TARGET_MIPS)
7601 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7602 ret = 0;
7603 break;
7604 #elif defined(TARGET_CRIS)
7605 if (arg1 & 0xff)
7606 ret = -TARGET_EINVAL;
7607 else {
7608 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7609 ret = 0;
7610 }
7611 break;
7612 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7613 ret = do_set_thread_area(cpu_env, arg1);
7614 break;
7615 #else
7616 goto unimplemented_nowarn;
7617 #endif
7618 #endif
7619 #ifdef TARGET_NR_get_thread_area
7620 case TARGET_NR_get_thread_area:
7621 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7622 ret = do_get_thread_area(cpu_env, arg1);
7623 #else
7624 goto unimplemented_nowarn;
7625 #endif
7626 #endif
7627 #ifdef TARGET_NR_getdomainname
7628 case TARGET_NR_getdomainname:
7629 goto unimplemented_nowarn;
7630 #endif
7631
7632 #ifdef TARGET_NR_clock_gettime
7633 case TARGET_NR_clock_gettime:
7634 {
7635 struct timespec ts;
7636 ret = get_errno(clock_gettime(arg1, &ts));
7637 if (!is_error(ret)) {
7638 host_to_target_timespec(arg2, &ts);
7639 }
7640 break;
7641 }
7642 #endif
7643 #ifdef TARGET_NR_clock_getres
7644 case TARGET_NR_clock_getres:
7645 {
7646 struct timespec ts;
7647 ret = get_errno(clock_getres(arg1, &ts));
7648 if (!is_error(ret)) {
7649 host_to_target_timespec(arg2, &ts);
7650 }
7651 break;
7652 }
7653 #endif
7654 #ifdef TARGET_NR_clock_nanosleep
7655 case TARGET_NR_clock_nanosleep:
7656 {
7657 struct timespec ts;
7658 target_to_host_timespec(&ts, arg3);
7659 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7660 if (arg4)
7661 host_to_target_timespec(arg4, &ts);
7662 break;
7663 }
7664 #endif
7665
7666 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7667 case TARGET_NR_set_tid_address:
7668 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7669 break;
7670 #endif
7671
7672 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7673 case TARGET_NR_tkill:
7674 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7675 break;
7676 #endif
7677
7678 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7679 case TARGET_NR_tgkill:
7680 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7681 target_to_host_signal(arg3)));
7682 break;
7683 #endif
7684
7685 #ifdef TARGET_NR_set_robust_list
7686 case TARGET_NR_set_robust_list:
7687 goto unimplemented_nowarn;
7688 #endif
7689
7690 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7691 case TARGET_NR_utimensat:
7692 {
7693 struct timespec *tsp, ts[2];
7694 if (!arg3) {
7695 tsp = NULL;
7696 } else {
7697 target_to_host_timespec(ts, arg3);
7698 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7699 tsp = ts;
7700 }
7701 if (!arg2)
7702 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7703 else {
7704 if (!(p = lock_user_string(arg2))) {
7705 ret = -TARGET_EFAULT;
7706 goto fail;
7707 }
7708 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7709 unlock_user(p, arg2, 0);
7710 }
7711 }
7712 break;
7713 #endif
7714 #if defined(CONFIG_USE_NPTL)
7715 case TARGET_NR_futex:
7716 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7717 break;
7718 #endif
7719 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7720 case TARGET_NR_inotify_init:
7721 ret = get_errno(sys_inotify_init());
7722 break;
7723 #endif
7724 #ifdef CONFIG_INOTIFY1
7725 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7726 case TARGET_NR_inotify_init1:
7727 ret = get_errno(sys_inotify_init1(arg1));
7728 break;
7729 #endif
7730 #endif
7731 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7732 case TARGET_NR_inotify_add_watch:
7733 p = lock_user_string(arg2);
7734 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7735 unlock_user(p, arg2, 0);
7736 break;
7737 #endif
7738 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7739 case TARGET_NR_inotify_rm_watch:
7740 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7741 break;
7742 #endif
7743
7744 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7745 case TARGET_NR_mq_open:
7746 {
7747 struct mq_attr posix_mq_attr;
7748
7749 p = lock_user_string(arg1 - 1);
7750 if (arg4 != 0)
7751 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7752 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7753 unlock_user (p, arg1, 0);
7754 }
7755 break;
7756
7757 case TARGET_NR_mq_unlink:
7758 p = lock_user_string(arg1 - 1);
7759 ret = get_errno(mq_unlink(p));
7760 unlock_user (p, arg1, 0);
7761 break;
7762
7763 case TARGET_NR_mq_timedsend:
7764 {
7765 struct timespec ts;
7766
7767 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7768 if (arg5 != 0) {
7769 target_to_host_timespec(&ts, arg5);
7770 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7771 host_to_target_timespec(arg5, &ts);
7772 }
7773 else
7774 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7775 unlock_user (p, arg2, arg3);
7776 }
7777 break;
7778
7779 case TARGET_NR_mq_timedreceive:
7780 {
7781 struct timespec ts;
7782 unsigned int prio;
7783
7784 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7785 if (arg5 != 0) {
7786 target_to_host_timespec(&ts, arg5);
7787 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7788 host_to_target_timespec(arg5, &ts);
7789 }
7790 else
7791 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7792 unlock_user (p, arg2, arg3);
7793 if (arg4 != 0)
7794 put_user_u32(prio, arg4);
7795 }
7796 break;
7797
7798 /* Not implemented for now... */
7799 /* case TARGET_NR_mq_notify: */
7800 /* break; */
7801
7802 case TARGET_NR_mq_getsetattr:
7803 {
7804 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7805 ret = 0;
7806 if (arg3 != 0) {
7807 ret = mq_getattr(arg1, &posix_mq_attr_out);
7808 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7809 }
7810 if (arg2 != 0) {
7811 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7812 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7813 }
7814
7815 }
7816 break;
7817 #endif
7818
7819 #ifdef CONFIG_SPLICE
7820 #ifdef TARGET_NR_tee
7821 case TARGET_NR_tee:
7822 {
7823 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7824 }
7825 break;
7826 #endif
7827 #ifdef TARGET_NR_splice
7828 case TARGET_NR_splice:
7829 {
7830 loff_t loff_in, loff_out;
7831 loff_t *ploff_in = NULL, *ploff_out = NULL;
7832 if(arg2) {
7833 get_user_u64(loff_in, arg2);
7834 ploff_in = &loff_in;
7835 }
7836 if(arg4) {
7837 get_user_u64(loff_out, arg2);
7838 ploff_out = &loff_out;
7839 }
7840 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
7841 }
7842 break;
7843 #endif
7844 #ifdef TARGET_NR_vmsplice
7845 case TARGET_NR_vmsplice:
7846 {
7847 int count = arg3;
7848 struct iovec *vec;
7849
7850 vec = alloca(count * sizeof(struct iovec));
7851 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7852 goto efault;
7853 ret = get_errno(vmsplice(arg1, vec, count, arg4));
7854 unlock_iovec(vec, arg2, count, 0);
7855 }
7856 break;
7857 #endif
7858 #endif /* CONFIG_SPLICE */
7859 #ifdef CONFIG_EVENTFD
7860 #if defined(TARGET_NR_eventfd)
7861 case TARGET_NR_eventfd:
7862 ret = get_errno(eventfd(arg1, 0));
7863 break;
7864 #endif
7865 #if defined(TARGET_NR_eventfd2)
7866 case TARGET_NR_eventfd2:
7867 ret = get_errno(eventfd(arg1, arg2));
7868 break;
7869 #endif
7870 #endif /* CONFIG_EVENTFD */
7871 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7872 case TARGET_NR_fallocate:
7873 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
7874 break;
7875 #endif
7876 #if defined(CONFIG_SYNC_FILE_RANGE)
7877 #if defined(TARGET_NR_sync_file_range)
7878 case TARGET_NR_sync_file_range:
7879 #if TARGET_ABI_BITS == 32
7880 #if defined(TARGET_MIPS)
7881 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7882 target_offset64(arg5, arg6), arg7));
7883 #else
7884 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
7885 target_offset64(arg4, arg5), arg6));
7886 #endif /* !TARGET_MIPS */
7887 #else
7888 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
7889 #endif
7890 break;
7891 #endif
7892 #if defined(TARGET_NR_sync_file_range2)
7893 case TARGET_NR_sync_file_range2:
7894 /* This is like sync_file_range but the arguments are reordered */
7895 #if TARGET_ABI_BITS == 32
7896 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7897 target_offset64(arg5, arg6), arg2));
7898 #else
7899 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
7900 #endif
7901 break;
7902 #endif
7903 #endif
7904 #if defined(CONFIG_EPOLL)
7905 #if defined(TARGET_NR_epoll_create)
7906 case TARGET_NR_epoll_create:
7907 ret = get_errno(epoll_create(arg1));
7908 break;
7909 #endif
7910 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7911 case TARGET_NR_epoll_create1:
7912 ret = get_errno(epoll_create1(arg1));
7913 break;
7914 #endif
7915 #if defined(TARGET_NR_epoll_ctl)
7916 case TARGET_NR_epoll_ctl:
7917 {
7918 struct epoll_event ep;
7919 struct epoll_event *epp = 0;
7920 if (arg4) {
7921 struct target_epoll_event *target_ep;
7922 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
7923 goto efault;
7924 }
7925 ep.events = tswap32(target_ep->events);
7926 /* The epoll_data_t union is just opaque data to the kernel,
7927 * so we transfer all 64 bits across and need not worry what
7928 * actual data type it is.
7929 */
7930 ep.data.u64 = tswap64(target_ep->data.u64);
7931 unlock_user_struct(target_ep, arg4, 0);
7932 epp = &ep;
7933 }
7934 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
7935 break;
7936 }
7937 #endif
7938
7939 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7940 #define IMPLEMENT_EPOLL_PWAIT
7941 #endif
7942 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7943 #if defined(TARGET_NR_epoll_wait)
7944 case TARGET_NR_epoll_wait:
7945 #endif
7946 #if defined(IMPLEMENT_EPOLL_PWAIT)
7947 case TARGET_NR_epoll_pwait:
7948 #endif
7949 {
7950 struct target_epoll_event *target_ep;
7951 struct epoll_event *ep;
7952 int epfd = arg1;
7953 int maxevents = arg3;
7954 int timeout = arg4;
7955
7956 target_ep = lock_user(VERIFY_WRITE, arg2,
7957 maxevents * sizeof(struct target_epoll_event), 1);
7958 if (!target_ep) {
7959 goto efault;
7960 }
7961
7962 ep = alloca(maxevents * sizeof(struct epoll_event));
7963
7964 switch (num) {
7965 #if defined(IMPLEMENT_EPOLL_PWAIT)
7966 case TARGET_NR_epoll_pwait:
7967 {
7968 target_sigset_t *target_set;
7969 sigset_t _set, *set = &_set;
7970
7971 if (arg5) {
7972 target_set = lock_user(VERIFY_READ, arg5,
7973 sizeof(target_sigset_t), 1);
7974 if (!target_set) {
7975 unlock_user(target_ep, arg2, 0);
7976 goto efault;
7977 }
7978 target_to_host_sigset(set, target_set);
7979 unlock_user(target_set, arg5, 0);
7980 } else {
7981 set = NULL;
7982 }
7983
7984 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
7985 break;
7986 }
7987 #endif
7988 #if defined(TARGET_NR_epoll_wait)
7989 case TARGET_NR_epoll_wait:
7990 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
7991 break;
7992 #endif
7993 default:
7994 ret = -TARGET_ENOSYS;
7995 }
7996 if (!is_error(ret)) {
7997 int i;
7998 for (i = 0; i < ret; i++) {
7999 target_ep[i].events = tswap32(ep[i].events);
8000 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8001 }
8002 }
8003 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8004 break;
8005 }
8006 #endif
8007 #endif
8008 #ifdef TARGET_NR_prlimit64
8009 case TARGET_NR_prlimit64:
8010 {
8011 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8012 struct target_rlimit64 *target_rnew, *target_rold;
8013 struct host_rlimit64 rnew, rold, *rnewp = 0;
8014 if (arg3) {
8015 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8016 goto efault;
8017 }
8018 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8019 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8020 unlock_user_struct(target_rnew, arg3, 0);
8021 rnewp = &rnew;
8022 }
8023
8024 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8025 if (!is_error(ret) && arg4) {
8026 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8027 goto efault;
8028 }
8029 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8030 target_rold->rlim_max = tswap64(rold.rlim_max);
8031 unlock_user_struct(target_rold, arg4, 1);
8032 }
8033 break;
8034 }
8035 #endif
8036 default:
8037 unimplemented:
8038 gemu_log("qemu: Unsupported syscall: %d\n", num);
8039 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8040 unimplemented_nowarn:
8041 #endif
8042 ret = -TARGET_ENOSYS;
8043 break;
8044 }
8045 fail:
8046 #ifdef DEBUG
8047 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8048 #endif
8049 if(do_strace)
8050 print_syscall_ret(num, ret);
8051 return ret;
8052 efault:
8053 ret = -TARGET_EFAULT;
8054 goto fail;
8055 }